diff --git a/.circleci/linters_runner.sh b/.circleci/analyze_non_packs_files.sh similarity index 95% rename from .circleci/linters_runner.sh rename to .circleci/analyze_non_packs_files.sh index 39507fa483c7..60b3fc088a36 100755 --- a/.circleci/linters_runner.sh +++ b/.circleci/analyze_non_packs_files.sh @@ -13,7 +13,7 @@ echo -e "Folders to be used for lint scan (used by pylint and mypy):\n${all_dirs python3 -m ruff $all_1_depth_dirs --select=E,F,PLC,PLE --ignore=PLC1901 || errors=$? -echo 'Linter exit code:' $errors +echo 'analyze non-packs files exit code:' $errors if [[ $errors -ne 0 ]]; then exit 1 fi diff --git a/.circleci/config.yml b/.circleci/config.yml index 2b0de97db736..cfbf911bda3b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -224,36 +224,10 @@ references: # poll for neo4j status until available while ! curl --fail http://127.0.0.1:7474 &> /dev/null; do sleep 1; done - ./.circleci/linters_runner.sh + ./.circleci/analyze_non_packs_files.sh ./.circleci/validate.sh - run_unit_testing_and_lint: &run_unit_testing_and_lint - run: - parameters: - dockerimageflag: - type: string - name: Run Unit Testing And Lint - Docker Image:<< parameters.dockerimageflag >> - when: always - no_output_timeout: 5h - command: | - if [[ "$(echo "$GCS_MARKET_BUCKET" | tr '[:upper:]' '[:lower:]')" != "marketplace-dist" ]]; then - echo "Skipping validations when uploading to a test bucket." - exit 0 - fi - - echo "demisto-sdk version: $(demisto-sdk --version)" - echo "mypy version: $(mypy --version)" - echo "flake8 py3 version: $(python3 -m flake8 --version)" - echo "bandit py3 version: $(python3 -m bandit --version 2>&1)" - echo "vulture py3 version: $(python3 -m vulture --version 2>&1)" - mkdir ./unit-tests - - neo4j start - # poll for neo4j status until available - while ! curl --fail http://127.0.0.1:7474 &> /dev/null; do sleep 1; done - - demisto-sdk lint -p 8 -g --test-xml ./unit-tests --log-path ./artifacts --failure-report ./artifacts --coverage-report $ARTIFACTS_FOLDER/coverage_report --docker-image << parameters.dockerimageflag >> --check-dependent-api-module - + # generate_coverage_reports: &generate_coverage_reports # run: # name: Generate coverage reports @@ -298,14 +272,6 @@ references: nightly_jobs: &nightly_jobs - Setup Environment: context: nightly_env - - Run Unit Testing And Lint: - context: nightly_env - requires: - - Setup Environment - matrix: - parameters: - dockerimageflag: [ "native:ga", "native:maintenance", "native:dev", "from-yml" ] - name: Run Unit Testing And Lint - Docker Image:<< matrix.dockerimageflag >> - Run Validations: requires: - Setup Environment @@ -324,25 +290,6 @@ jobs: - *get_contribution_pack - *persist_to_workspace - Run Unit Testing And Lint: - <<: *container_config - resource_class: large - <<: *environment - parameters: - dockerimageflag: - type: string - steps: - - *attach_workspace - - *remote_docker - - *install_build_dependencies - - *install_node_ci - - *install_neo4j - - *prepare_environment - - *run_unit_testing_and_lint - # - *generate_coverage_reports - - store_test_results: - path: ./unit-tests - - *store_artifacts Run Validations: <<: *container_config diff --git a/.circleci/git_pull_master_into_fork.sh b/.circleci/git_pull_master_into_fork.sh index 2e66e2f7cea3..8b851775a464 100755 --- a/.circleci/git_pull_master_into_fork.sh +++ b/.circleci/git_pull_master_into_fork.sh @@ -1,7 +1,9 @@ #!/usr/bin/env bash - +# this file has been deprecated and relocated to the contribution/utils directory #Be aware, only contributors should run this script. +echo "This file has been deprecated and relocated to the contribution/utils directory" + CONTENT_URL='https://github.com/demisto/content.git' if [ -z "$1" ] diff --git a/.circleci/is_file_up_to_date.sh b/.circleci/is_file_up_to_date.sh index 04986c75c84f..53e3d4547585 100755 --- a/.circleci/is_file_up_to_date.sh +++ b/.circleci/is_file_up_to_date.sh @@ -31,7 +31,7 @@ if [[ $(git diff origin/master -G"." -- ${FILE_TO_CHECK}) ]]; then fi if [[ $BRANCH =~ pull/[0-9]+ ]]; then - echo "Run ./.circleci/git_pull_master_into_fork.sh or merge manually from upstream demisto content" + echo "Run ./contribution/utils/git_pull_master_into_fork.sh or merge manually from upstream demisto content" fi exit 1 diff --git a/.circleci/update_contribution_pack_in_base_branch.py b/.circleci/update_contribution_pack_in_base_branch.py index 669d8b9a1256..94047c30d035 100755 --- a/.circleci/update_contribution_pack_in_base_branch.py +++ b/.circleci/update_contribution_pack_in_base_branch.py @@ -25,7 +25,7 @@ def main(): github_token = args.github_token print( - f"args received in Utils/update_contribution_pack_in_base_branch.py script: {pr_number=}, {username=}, {repo=}, {branch=}" + f"args received in .circleci/update_contribution_pack_in_base_branch.py script: {pr_number=}, {username=}, {repo=}, {branch=}" ) packs_dir_names = get_files_from_github( diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 23115520c85e..68c3565d82db 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -9,7 +9,7 @@ /Tests/Marketplace/corepacks_override.json @yaakovpraisler # Docker native image -/Tests/docker_native_image_config.json @JudahSchwartz @samuelFain +/Tests/docker_native_image_config.json @JudahSchwartz @shmuel44 # Marketplace & Upload-Flow /Tests/scripts/create_artifacts_graph/create_artifacts.py @RosenbergYehuda diff --git a/.github/content_roles.json b/.github/content_roles.json index 7151244b0281..239cd8ad4b26 100644 --- a/.github/content_roles.json +++ b/.github/content_roles.json @@ -1,14 +1,14 @@ { "CONTRIBUTION_REVIEWERS": [ - "RotemAmit", - "jlevypaloalto", - "Shellyber" + "barryyosi-panw", + "samuelFain", + "israelpoli" ], - "CONTRIBUTION_TL": "BEAdi", + "CONTRIBUTION_TL": "thefrieddan1", "CONTRIBUTION_SECURITY_REVIEWER": "ssokolovich", "ON_CALL_DEVS": [ - "acarmi", - "ypreisler" + "skidorball", + "ayousef" ], "DOC_REVIEWER": "ShirleyDenkberg", "TIM_REVIEWER": "MLainer1" diff --git a/Utils/github_workflow_scripts/autobump_release_notes/autobump_rn.py b/.github/github_workflow_scripts/autobump_rn.py similarity index 98% rename from Utils/github_workflow_scripts/autobump_release_notes/autobump_rn.py rename to .github/github_workflow_scripts/autobump_rn.py index 544231b911b0..6e5f99ddac04 100644 --- a/Utils/github_workflow_scripts/autobump_release_notes/autobump_rn.py +++ b/.github/github_workflow_scripts/autobump_rn.py @@ -8,11 +8,11 @@ from github.PullRequest import PullRequest from github.Repository import Repository import sys -from Utils.github_workflow_scripts.autobump_release_notes.skip_conditions import MetadataCondition, \ +from skip_conditions import MetadataCondition, \ LastModifiedCondition, LabelCondition, AddedRNFilesCondition, HasConflictOnAllowedFilesCondition, \ PackSupportCondition, MajorChangeCondition, MaxVersionCondition, OnlyVersionChangedCondition, \ OnlyOneRNPerPackCondition, SameRNMetadataVersionCondition, AllowedBumpCondition, UpdateType -from Utils.github_workflow_scripts.utils import timestamped_print, Checkout +from utils import timestamped_print, Checkout from git import Repo from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN import os @@ -192,6 +192,10 @@ def manage(self): for pr in self.github_repo_obj.get_pulls( state="open", sort="created", base=BASE ): + if pr.draft: + # The bot does not go through a PR that is in draft + continue + print( f"{t.yellow}Looking on pr number [{pr.number}]: last updated: " f"{str(pr.updated_at)}, branch={pr.head.ref}" diff --git a/Utils/github_workflow_scripts/check_if_needs_to_fill_contribution_form.py b/.github/github_workflow_scripts/check_if_needs_to_fill_contribution_form.py similarity index 100% rename from Utils/github_workflow_scripts/check_if_needs_to_fill_contribution_form.py rename to .github/github_workflow_scripts/check_if_needs_to_fill_contribution_form.py diff --git a/Utils/github_workflow_scripts/check_if_partner_approved_label_exists.py b/.github/github_workflow_scripts/check_if_partner_approved_label_exists.py similarity index 97% rename from Utils/github_workflow_scripts/check_if_partner_approved_label_exists.py rename to .github/github_workflow_scripts/check_if_partner_approved_label_exists.py index cc2a16a570f1..e43864133893 100755 --- a/Utils/github_workflow_scripts/check_if_partner_approved_label_exists.py +++ b/.github/github_workflow_scripts/check_if_partner_approved_label_exists.py @@ -7,7 +7,7 @@ from github.Repository import Repository from github.PullRequest import PullRequest from demisto_sdk.commands.common.tools import get_pack_name -from Utils.github_workflow_scripts.utils import timestamped_print, get_support_level +from utils import timestamped_print, get_support_level urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) print = timestamped_print diff --git a/Utils/github_workflow_scripts/check_protected_directories.py b/.github/github_workflow_scripts/check_protected_directories.py similarity index 100% rename from Utils/github_workflow_scripts/check_protected_directories.py rename to .github/github_workflow_scripts/check_protected_directories.py diff --git a/Utils/github_workflow_scripts/create_internal_pr.py b/.github/github_workflow_scripts/create_internal_pr.py similarity index 94% rename from Utils/github_workflow_scripts/create_internal_pr.py rename to .github/github_workflow_scripts/create_internal_pr.py index cbef3a8ba486..40e2ed5f7e2a 100755 --- a/Utils/github_workflow_scripts/create_internal_pr.py +++ b/.github/github_workflow_scripts/create_internal_pr.py @@ -7,12 +7,11 @@ from github import Github from handle_external_pr import EXTERNAL_LABEL -from Utils.github_workflow_scripts.utils import ( +from utils import ( get_env_var, timestamped_print, - load_json, get_doc_reviewer, - CONTENT_ROLES_PATH + get_content_roles ) from urllib3.exceptions import InsecureRequestWarning @@ -81,7 +80,7 @@ def main(): assignees = [assignee.login for assignee in merged_pr.assignees] # Un-assign the tech writer (cause the docs reviewed has already been done on the external PR) - content_roles = load_json(CONTENT_ROLES_PATH) + content_roles = get_content_roles() if content_roles: try: @@ -96,7 +95,7 @@ def main(): print(f"{str(ve)}. Skipped tech writer unassignment.") else: - print(f"Unable to parse JSON from '{CONTENT_ROLES_PATH}'. Skipping tech writer unassignment.") + print("Unable to get content roles. Skipping tech writer unassignment...") pr.add_to_assignees(*assignees) print(f'{t.cyan}Assigned users {assignees}{t.normal}') diff --git a/Utils/github_workflow_scripts/github_workflow_scripts_tests/autobump_rn_test.py b/.github/github_workflow_scripts/github_workflow_scripts_tests/autobump_rn_test.py similarity index 97% rename from Utils/github_workflow_scripts/github_workflow_scripts_tests/autobump_rn_test.py rename to .github/github_workflow_scripts/github_workflow_scripts_tests/autobump_rn_test.py index b3b3cbfd1da2..8a63d61f229b 100644 --- a/Utils/github_workflow_scripts/github_workflow_scripts_tests/autobump_rn_test.py +++ b/.github/github_workflow_scripts/github_workflow_scripts_tests/autobump_rn_test.py @@ -5,17 +5,17 @@ from typing import Optional, List from unittest.mock import MagicMock import pytest -from Utils.github_workflow_scripts.autobump_release_notes.autobump_rn import ( +from github_workflow_scripts.autobump_release_notes.autobump_rn import ( PackAutoBumper, BranchAutoBumper, AutoBumperManager, ) -from Utils.github_workflow_scripts.autobump_release_notes.skip_conditions import ConditionResult, MetadataCondition, \ +from github_workflow_scripts.autobump_release_notes.skip_conditions import ConditionResult, MetadataCondition, \ LastModifiedCondition, LabelCondition, AddedRNFilesCondition, HasConflictOnAllowedFilesCondition, \ PackSupportCondition, MajorChangeCondition, MaxVersionCondition, OnlyVersionChangedCondition, \ OnlyOneRNPerPackCondition, SameRNMetadataVersionCondition, AllowedBumpCondition, UpdateType from git import GitCommandError from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN -from Utils.github_workflow_scripts.autobump_release_notes import skip_conditions +from github_workflow_scripts.autobump_release_notes import skip_conditions MERGE_STDOUT = "stdout: '\n Auto-merging {}\n failed.\n Auto-merging {}\n failed.\n" @@ -163,7 +163,7 @@ def test_get_metadata_files(mocker): origin_metadata = {"name": "MyPack", "currentVersion": "1.0.5"} branch_metadata = {"name": "MyPack", "currentVersion": "1.0.4"} base_metadata = {"name": "MyPack", "currentVersion": "1.0.3"} - mocker.patch("Utils.github_workflow_scripts.autobump_release_notes.skip_conditions.Checkout") + mocker.patch("github_workflow_scripts.autobump_release_notes.skip_conditions.Checkout") mocker.patch.object( skip_conditions, "load_json", @@ -614,7 +614,7 @@ def test_branch_auto_bumper(mocker): pack_auto_bumper = MagicMock() pack_auto_bumper.autobump.return_value = "1.0.2" pack_auto_bumper.pack_id = "MyPack" - mocker.patch("Utils.github_workflow_scripts.autobump_release_notes.autobump_rn.Checkout") + mocker.patch("github_workflow_scripts.autobump_release_notes.autobump_rn.Checkout") branch_auto_bumper = BranchAutoBumper( pr=PullRequest(), git_repo=Repo(), @@ -647,7 +647,7 @@ def test_autobump_manager(mocker): git_repo_obj=Repo(files=CHANGED_FILES), run_id="1", ) - mocker.patch("Utils.github_workflow_scripts.autobump_release_notes.autobump_rn.Checkout") + mocker.patch("github_workflow_scripts.autobump_release_notes.autobump_rn.Checkout") mocker.patch.object(BranchAutoBumper, "autobump") mocker.patch.object( MetadataCondition, diff --git a/Utils/github_workflow_scripts/github_workflow_scripts_tests/check_if_partner_approved_label_test.py b/.github/github_workflow_scripts/github_workflow_scripts_tests/check_if_partner_approved_label_test.py similarity index 62% rename from Utils/github_workflow_scripts/github_workflow_scripts_tests/check_if_partner_approved_label_test.py rename to .github/github_workflow_scripts/github_workflow_scripts_tests/check_if_partner_approved_label_test.py index ed82273095af..7bb829c3ddc7 100644 --- a/Utils/github_workflow_scripts/github_workflow_scripts_tests/check_if_partner_approved_label_test.py +++ b/.github/github_workflow_scripts/github_workflow_scripts_tests/check_if_partner_approved_label_test.py @@ -4,11 +4,11 @@ @pytest.mark.parametrize( 'support_levels, expected_support_level', [ ( - ['Utils/github_workflow_scripts/github_workflow_scripts_tests/test_files/Packs/Pack1/pack_metadata.json'], + ['.github/github_workflow_scripts/github_workflow_scripts_tests/test_files/Packs/Pack1/pack_metadata.json'], {'xsoar'} ), ( - ['Utils/github_workflow_scripts/github_workflow_scripts_tests/test_files/Packs/Pack2/pack_metadata.json'], + ['.github/github_workflow_scripts/github_workflow_scripts_tests/test_files/Packs/Pack2/pack_metadata.json'], {'partner'} ), ] @@ -24,5 +24,5 @@ def test_get_support_level(support_levels, expected_support_level): Then: - make sure the highest support level is always returned """ - from Utils.github_workflow_scripts.check_if_partner_approved_label_exists import get_support_level + from github_workflow_scripts.check_if_partner_approved_label_exists import get_support_level assert get_support_level(support_levels) == expected_support_level diff --git a/Utils/github_workflow_scripts/github_workflow_scripts_tests/handle_external_pr_test.py b/.github/github_workflow_scripts/github_workflow_scripts_tests/handle_external_pr_test.py similarity index 91% rename from Utils/github_workflow_scripts/github_workflow_scripts_tests/handle_external_pr_test.py rename to .github/github_workflow_scripts/github_workflow_scripts_tests/handle_external_pr_test.py index d0d7bb6c80fb..0cd539fd3949 100644 --- a/Utils/github_workflow_scripts/github_workflow_scripts_tests/handle_external_pr_test.py +++ b/.github/github_workflow_scripts/github_workflow_scripts_tests/handle_external_pr_test.py @@ -1,7 +1,7 @@ import os import pytest from typing import Final -from Utils.github_workflow_scripts.handle_external_pr import is_requires_security_reviewer, get_location_of_reviewer +from github_workflow_scripts.handle_external_pr import is_requires_security_reviewer, get_location_of_reviewer INTEGRATION_PATH: Final[str] = 'Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py' @@ -68,7 +68,7 @@ def test_get_highest_support_label(support_levels, expected_support_level): Then: - make sure the highest support level is always returned """ - from Utils.github_workflow_scripts.handle_external_pr import get_highest_support_label + from github_workflow_scripts.handle_external_pr import get_highest_support_label assert get_highest_support_label(support_levels) == expected_support_level @@ -90,15 +90,15 @@ def test_get_packs_support_level_label(mocker, fork_owner, expected_fork_owner): - make sure correct support label is returned. - fork owner that is being delivered to the Checkout branch is correct. """ - from Utils.github_workflow_scripts.handle_external_pr import get_packs_support_level_label, Checkout - from Utils.github_workflow_scripts.utils import ChangeCWD + from github_workflow_scripts.handle_external_pr import get_packs_support_level_label, Checkout + from github_workflow_scripts.utils import ChangeCWD checkout_mocker = mocker.patch.object(Checkout, '__init__', return_value=None) mocker.patch.object(Checkout, '__enter__', return_value=None) mocker.patch.object(Checkout, '__exit__', return_value=None) mocker.patch.object(os, 'getenv', return_value=fork_owner) - with ChangeCWD('Utils/github_workflow_scripts/github_workflow_scripts_tests/test_files'): + with ChangeCWD('.github/github_workflow_scripts/github_workflow_scripts_tests/test_files'): assert get_packs_support_level_label( file_paths=['Packs/Pack1/pack_metadata.json'], external_pr_branch='test' ) == 'Xsoar Support Level' @@ -117,12 +117,12 @@ def test_get_packs_support_level_label_checkout_failed(mocker): Then: - make sure correct support label is still returned. """ - from Utils.github_workflow_scripts.handle_external_pr import get_packs_support_level_label, Checkout - from Utils.github_workflow_scripts.utils import ChangeCWD + from github_workflow_scripts.handle_external_pr import get_packs_support_level_label, Checkout + from github_workflow_scripts.utils import ChangeCWD mocker.patch.object(Checkout, '__init__', return_value=Exception('Error')) - with ChangeCWD('Utils/github_workflow_scripts/github_workflow_scripts_tests/test_files'): + with ChangeCWD('.github/github_workflow_scripts/github_workflow_scripts_tests/test_files'): assert get_packs_support_level_label( file_paths=['Packs/Pack1/pack_metadata.json'], external_pr_branch='test' ) == 'Xsoar Support Level' diff --git a/Utils/github_workflow_scripts/github_workflow_scripts_tests/link_pr_to_jira_issue_test.py b/.github/github_workflow_scripts/github_workflow_scripts_tests/link_pr_to_jira_issue_test.py similarity index 98% rename from Utils/github_workflow_scripts/github_workflow_scripts_tests/link_pr_to_jira_issue_test.py rename to .github/github_workflow_scripts/github_workflow_scripts_tests/link_pr_to_jira_issue_test.py index 6dcad2c0e7c2..09e59a14f56d 100644 --- a/Utils/github_workflow_scripts/github_workflow_scripts_tests/link_pr_to_jira_issue_test.py +++ b/.github/github_workflow_scripts/github_workflow_scripts_tests/link_pr_to_jira_issue_test.py @@ -1,6 +1,6 @@ import pytest -from Utils.github_workflow_scripts.jira_integration_scripts import link_pr_to_jira_issue +from github_workflow_scripts.jira_integration_scripts import link_pr_to_jira_issue PR_WITH_ONLY_FIXES_WITH_SPACE = """This pr is dummy fixes: https://jira-hq.paloaltonetworks.local/browse/CIAC-3473 somthing diff --git a/Utils/github_workflow_scripts/github_workflow_scripts_tests/test_files/Packs/Pack1/pack_metadata.json b/.github/github_workflow_scripts/github_workflow_scripts_tests/test_files/Packs/Pack1/pack_metadata.json similarity index 100% rename from Utils/github_workflow_scripts/github_workflow_scripts_tests/test_files/Packs/Pack1/pack_metadata.json rename to .github/github_workflow_scripts/github_workflow_scripts_tests/test_files/Packs/Pack1/pack_metadata.json diff --git a/Utils/github_workflow_scripts/github_workflow_scripts_tests/test_files/Packs/Pack2/pack_metadata.json b/.github/github_workflow_scripts/github_workflow_scripts_tests/test_files/Packs/Pack2/pack_metadata.json similarity index 100% rename from Utils/github_workflow_scripts/github_workflow_scripts_tests/test_files/Packs/Pack2/pack_metadata.json rename to .github/github_workflow_scripts/github_workflow_scripts_tests/test_files/Packs/Pack2/pack_metadata.json diff --git a/Utils/github_workflow_scripts/handle_external_pr.py b/.github/github_workflow_scripts/handle_external_pr.py similarity index 79% rename from Utils/github_workflow_scripts/handle_external_pr.py rename to .github/github_workflow_scripts/handle_external_pr.py index e5f0f9ef5637..058b9a8f9ced 100755 --- a/Utils/github_workflow_scripts/handle_external_pr.py +++ b/.github/github_workflow_scripts/handle_external_pr.py @@ -1,28 +1,28 @@ #!/usr/bin/env python3 import json -import os from pathlib import Path +import sys import urllib3 from blessings import Terminal from github import Github from git import Repo from github.PullRequest import PullRequest from github.Repository import Repository -from demisto_sdk.commands.common.tools import get_pack_metadata +from demisto_sdk.commands.common.tools import get_pack_metadata, get_yaml from demisto_sdk.commands.content_graph.objects.base_content import BaseContent from demisto_sdk.commands.content_graph.objects.integration import Integration from demisto_sdk.commands.common.content_constant_paths import CONTENT_PATH from random import randint import re -from Utils.github_workflow_scripts.utils import ( +from utils import ( get_env_var, timestamped_print, Checkout, - load_json, get_content_reviewers, - CONTENT_ROLES_PATH, - get_support_level + get_support_level, + get_content_roles, + get_metadata ) from demisto_sdk.commands.common.tools import get_pack_name from urllib3.exceptions import InsecureRequestWarning @@ -167,7 +167,8 @@ def packs_to_check_in_pr(file_paths: list[str]) -> set: return pack_dirs_to_check -def get_packs_support_level_label(file_paths: list[str], external_pr_branch: str, repo_name: str = 'content') -> str: +def get_packs_support_level_label(file_paths: list[str], external_pr_branch: str, remote_fork_owner: str, + repo_name: str = 'content') -> str: """ Get The contributions' support level label. @@ -183,6 +184,7 @@ def get_packs_support_level_label(file_paths: list[str], external_pr_branch: str Args: file_paths(str): file paths external_pr_branch (str): the branch of the external PR. + remote_fork_owner: the remote fork owner repo_name(str): the name of the forked repo (without the owner) Returns: @@ -198,12 +200,11 @@ def get_packs_support_level_label(file_paths: list[str], external_pr_branch: str f'to retrieve support level of {pack_dirs_to_check_support_levels_labels}' ) try: - fork_owner = os.getenv('GITHUB_ACTOR') with Checkout( repo=Repo(Path().cwd(), search_parent_directories=True), branch_to_checkout=external_pr_branch, # in marketplace contributions the name of the owner should be xsoar-contrib - fork_owner=fork_owner if fork_owner != 'xsoar-bot' else 'xsoar-contrib', + fork_owner=remote_fork_owner if remote_fork_owner != 'xsoar-bot' else 'xsoar-contrib', repo_name=repo_name ): packs_support_levels = get_support_level(pack_dirs_to_check_support_levels_labels) @@ -253,35 +254,96 @@ def is_requires_security_reviewer(pr_files: list[str]) -> bool: return False -def is_tim_content(pr_files: list[str]) -> bool: +def check_if_item_is_tim(content_object: BaseContent | None) -> bool: """ - This is where the actual search for feed:True or relevant tags or categories are being searched - according to the login in is_tim_reviewer_needed + Checks whether a given object (graph object) is a feed or related to TIM Arguments: - - pr_files: List[str] The list of files changed in the Pull Request. + - `content_object`: ``BaseContent``: Content object taken from the graph - Returns: returns True or False if tim reviewer needed + Returns: `bool` whether the content object is a feed or has the relevant tags/categories """ - integrations_checked = [] - for file in pr_files: - if 'CONTRIBUTORS.json' in file: - continue - integration = BaseContent.from_path(CONTENT_PATH / file) - if not isinstance(integration, Integration) or integration.path in integrations_checked: - continue - integrations_checked.append(integration.path) - if integration.is_feed: - return True - pack = integration.in_pack + if isinstance(content_object, Integration) and content_object.is_feed: + return True + try: + pack = content_object.in_pack # type: ignore tags = pack.tags categories = pack.categories if TIM_TAGS in tags or TIM_CATEGORIES in categories: return True + except Exception as er: + print(f"The pack is not TIM: {er}") + finally: + return False + + +def check_files_of_pr_manually(pr_files: list[str]) -> bool: + """ + If the checkout of the branch has failed, this function will go over the files and check whether the contribution + need to be reviewed by TIM owner + + Arguments: + - `pr_files`: ``List[str]``: The list of files changed in the Pull Request. Will be used to determine + whether a security engineer is required for the review. + + Returns: `bool` whether a security engineer should be assigned + """ + pack_dirs_to_check = packs_to_check_in_pr(pr_files) + pack_metadata_list = get_metadata(pack_dirs_to_check) + for file in pr_files: + if "yml" in file and "Integrations" in file: + content_yml = get_yaml(file_path=file) + is_feed = content_yml.get("script").get("feed", "False") + print(f'Is it a feed: {is_feed}') + if is_feed: + return True + for pack_metadata in pack_metadata_list: + print(f'the metadata is: {pack_metadata}') + tags = pack_metadata.get("tags") + categories = pack_metadata.get("categories") + if TIM_TAGS in tags or TIM_CATEGORIES in categories: # type: ignore + return True + return False + + +def is_tim_content(pr_files: list[str], external_pr_branch: str, remote_fork_owner: str, repo_name: str) -> bool: + """ + Checks if tim reviewer needed, if the pack is new and not part of Master. + First the remote branch is going to be checked out and then verified for the data + + Arguments: + - `pr_files`: ``List[str]``: The list of files changed in the Pull Request. Will be used to determine + whether a security engineer is required for the review. + - 'external_pr_branch': str : name of branch to checkout + - 'remote_fork_owner' (str) : name of the remote owner for checkout + - 'repo_name': str : name of repository + + Returns: `bool` whether a security engineer should be assigned + """ + try: + with Checkout( + repo=Repo(Path().cwd(), search_parent_directories=True), + branch_to_checkout=external_pr_branch, + # in marketplace contributions the name of the owner should be xsoar-contrib + fork_owner=remote_fork_owner if remote_fork_owner != 'xsoar-bot' else 'xsoar-contrib', + repo_name=repo_name + ): + for file in pr_files: + if 'CONTRIBUTORS.json' in file or 'Author_image' in file or 'README.md' in file or ".pack-ignore" in file: + continue + content_object = BaseContent.from_path(CONTENT_PATH / file) + is_tim_needed = check_if_item_is_tim(content_object) + if is_tim_needed: + return True + except Exception as er: + print(f"couldn't checkout branch to get metadata, error is {er}") + # if the checkout didn't work for any reason, will try to go over files manually + return check_files_of_pr_manually(pr_files) return False -def is_tim_reviewer_needed(pr_files: list[str], support_label: str) -> bool: +def is_tim_reviewer_needed(pr_files: list[str], support_label: str, external_pr_branch: str, + remote_fork_owner: str, repo_name: str) -> bool: """ Checks whether the PR need to be reviewed by a TIM reviewer. It check the yml file of the integration - if it has the feed: True @@ -291,11 +353,14 @@ def is_tim_reviewer_needed(pr_files: list[str], support_label: str) -> bool: Arguments: - pr_files: tThe list of files changed in the Pull Request - support_label: the support label of the PR - the highest one. + - 'external_pr_branch' (str) : name of the external branch to checkout + - 'remote_fork_owner' (str) : name of the remote owner for checkout + - 'repo_name' (str) : name of the external repository Returns: True or false if tim reviewer needed """ if support_label in (XSOAR_SUPPORT_LEVEL_LABEL, PARTNER_SUPPORT_LEVEL_LABEL): - return is_tim_content(pr_files) + return is_tim_content(pr_files, external_pr_branch, remote_fork_owner, repo_name) return False @@ -332,7 +397,7 @@ def find_all_open_prs_by_user(content_repo: Repository, pr_creator: str, pr_numb for pr in all_prs: if pr.number == pr_number: # Exclude current PR continue - existing_pr_author = get_user_from_pr_body(pr) if pr.user.login == "xsoar-bot" else pr.user.login + existing_pr_author = get_user_from_pr_body(pr) if pr.user.login in ["xsoar-bot", "content-bot"] else pr.user.login if existing_pr_author == pr_creator: similar_prs.append(pr) print(f'PR\'s by the same author: {similar_prs}') @@ -343,6 +408,7 @@ def reviewer_of_prs_from_current_round(other_prs_by_same_user: list, content_rev """ Get all PR's that are currently open from the same author, filter the list and return reviewer if reviewer is part of the current contribution round + The check for reviewer is done with assignees because reviewers list after initial review is empty. Arguments: - other_prs_by_same_user - list of opened PR's @@ -351,8 +417,9 @@ def reviewer_of_prs_from_current_round(other_prs_by_same_user: list, content_rev """ content_reviewers_set = set(content_reviewers) for pr in other_prs_by_same_user: - reviewer_names = {reviewer.login for reviewer in pr.requested_reviewers} - existing_reviewer = content_reviewers_set.intersection(reviewer_names) + print(f'the requested assignees are : {pr.assignees}') + assignee_names = {assignee.login for assignee in pr.assignees} + existing_reviewer = content_reviewers_set.intersection(assignee_names) if existing_reviewer: return existing_reviewer.pop() else: @@ -371,7 +438,7 @@ def find_reviewer_to_assign(content_repo: Repository, pr: PullRequest, pr_number Returns: - Reviewer to assign """ - if pr.user.login == "xsoar-bot": + if pr.user.login in ["xsoar-bot", "content-bot"]: pr_creator = get_user_from_pr_body(pr) else: pr_creator = pr.user.login @@ -425,9 +492,9 @@ def main(): pr_files = [file.filename for file in pr.get_files()] print(f'{pr_files=} for {pr_number=}') - + remote_fork_owner = pr.head.repo.full_name.split('/')[0] labels_to_add = [CONTRIBUTION_LABEL, EXTERNAL_LABEL] - if support_label := get_packs_support_level_label(pr_files, pr.head.ref, repo_name): + if support_label := get_packs_support_level_label(pr_files, pr.head.ref, remote_fork_owner, repo_name): labels_to_add.append(support_label) # Add the initial labels to PR: @@ -463,7 +530,12 @@ def main(): # Parse PR reviewers from JSON and assign them # Exit if JSON doesn't exist or not parsable - content_roles = load_json(CONTENT_ROLES_PATH) + content_roles = get_content_roles() + + if not content_roles: + print("Unable to retrieve the content roles. Exiting...") + sys.exit(1) + content_reviewers, security_reviewer, tim_reviewer = get_content_reviewers(content_roles) print(f"Content Reviewers: {','.join(content_reviewers)}") @@ -482,7 +554,7 @@ def main(): pr.add_to_labels(SECURITY_LABEL) # adding TIM reviewer - if is_tim_reviewer_needed(pr_files, support_label): + if is_tim_reviewer_needed(pr_files, support_label, pr.head.ref, remote_fork_owner, repo_name): reviewers.append(tim_reviewer) pr.add_to_labels(TIM_LABEL) diff --git a/Utils/github_workflow_scripts/jira_integration_scripts/link_pr_to_jira_issue.py b/.github/github_workflow_scripts/jira_integration_scripts/link_pr_to_jira_issue.py similarity index 100% rename from Utils/github_workflow_scripts/jira_integration_scripts/link_pr_to_jira_issue.py rename to .github/github_workflow_scripts/jira_integration_scripts/link_pr_to_jira_issue.py diff --git a/Utils/github_workflow_scripts/parse_junit_per_pack.py b/.github/github_workflow_scripts/parse_junit_per_pack.py similarity index 100% rename from Utils/github_workflow_scripts/parse_junit_per_pack.py rename to .github/github_workflow_scripts/parse_junit_per_pack.py diff --git a/Utils/github_workflow_scripts/request_contributor_review.py b/.github/github_workflow_scripts/request_contributor_review.py similarity index 100% rename from Utils/github_workflow_scripts/request_contributor_review.py rename to .github/github_workflow_scripts/request_contributor_review.py diff --git a/Utils/github_workflow_scripts/run_secrets_detection.py b/.github/github_workflow_scripts/run_secrets_detection.py similarity index 100% rename from Utils/github_workflow_scripts/run_secrets_detection.py rename to .github/github_workflow_scripts/run_secrets_detection.py diff --git a/Utils/github_workflow_scripts/run_secrets_detection_get_playbook_status.py b/.github/github_workflow_scripts/run_secrets_detection_get_playbook_status.py similarity index 100% rename from Utils/github_workflow_scripts/run_secrets_detection_get_playbook_status.py rename to .github/github_workflow_scripts/run_secrets_detection_get_playbook_status.py diff --git a/Utils/github_workflow_scripts/send_slack_message.py b/.github/github_workflow_scripts/send_slack_message.py similarity index 100% rename from Utils/github_workflow_scripts/send_slack_message.py rename to .github/github_workflow_scripts/send_slack_message.py diff --git a/Utils/github_workflow_scripts/autobump_release_notes/skip_conditions.py b/.github/github_workflow_scripts/skip_conditions.py similarity index 99% rename from Utils/github_workflow_scripts/autobump_release_notes/skip_conditions.py rename to .github/github_workflow_scripts/skip_conditions.py index 5fb6f995a37a..225bfff8183a 100644 --- a/Utils/github_workflow_scripts/autobump_release_notes/skip_conditions.py +++ b/.github/github_workflow_scripts/skip_conditions.py @@ -7,7 +7,7 @@ from github.PullRequest import PullRequest from packaging.version import Version from blessings import Terminal -from Utils.github_workflow_scripts.utils import load_json, Checkout, timestamped_print +from utils import load_json, Checkout, timestamped_print print = timestamped_print diff --git a/Utils/github_workflow_scripts/sync_contrib_base.py b/.github/github_workflow_scripts/sync_contrib_base.py similarity index 100% rename from Utils/github_workflow_scripts/sync_contrib_base.py rename to .github/github_workflow_scripts/sync_contrib_base.py diff --git a/Utils/github_workflow_scripts/utils.py b/.github/github_workflow_scripts/utils.py similarity index 61% rename from Utils/github_workflow_scripts/utils.py rename to .github/github_workflow_scripts/utils.py index e6eb3d4d342b..5609bdd17029 100644 --- a/Utils/github_workflow_scripts/utils.py +++ b/.github/github_workflow_scripts/utils.py @@ -3,22 +3,26 @@ import os import sys import json +import git.exc +import requests from datetime import datetime from typing import Any from collections.abc import Generator, Iterable from pathlib import Path from demisto_sdk.commands.common.tools import get_pack_metadata +import git -from git import Repo - -CONTENT_ROOT_PATH = os.path.abspath(os.path.join(__file__, '../../..')) # full path to content root repo -CONTENT_ROLES_PATH = Path(os.path.join(CONTENT_ROOT_PATH, ".github", "content_roles.json")) DOC_REVIEWER_KEY = "DOC_REVIEWER" CONTRIBUTION_REVIEWERS_KEY = "CONTRIBUTION_REVIEWERS" CONTRIBUTION_SECURITY_REVIEWER_KEY = "CONTRIBUTION_SECURITY_REVIEWER" TIM_REVIEWER_KEY = "TIM_REVIEWER" +CONTENT_ROLES_FILENAME = "content_roles.json" +GITHUB_HIDDEN_DIR = ".github" +CONTENT_ROLES_BLOB_MASTER_URL = f"https://raw.githubusercontent.com/demisto/content/master/{GITHUB_HIDDEN_DIR}/{CONTENT_ROLES_FILENAME}" + + # override print so we have a timestamp with each print org_print = print CallArgs = Iterable[tuple[Any] | tuple[Any, dict]] @@ -102,7 +106,7 @@ class Checkout: # pragma: no cover previously current branch. """ - def __init__(self, repo: Repo, branch_to_checkout: str, fork_owner: str | None = None, repo_name: str = 'content'): + def __init__(self, repo: git.Repo, branch_to_checkout: str, fork_owner: str | None = None, repo_name: str = 'content'): """Initializes instance attributes. Arguments: repo: git repo object @@ -120,19 +124,21 @@ def __init__(self, repo: Repo, branch_to_checkout: str, fork_owner: str | None = self.repo.create_remote(name=forked_remote_name, url=url) print(f'Successfully created remote {forked_remote_name} for repo {url}') # noqa: T201 except Exception as error: - print(f'could not create remote from {url}, {error=}') # noqa: T201 - # handle the case where the name of the forked repo is not content - if github_event_path := os.getenv("GITHUB_EVENT_PATH"): - try: - payload = json.loads(github_event_path) - except ValueError: - print('failed to load GITHUB_EVENT_PATH') # noqa: T201 - raise ValueError(f'cannot checkout to the forked branch {branch_to_checkout} of the owner {fork_owner}') - # forked repo name includes fork_owner + repo name, for example foo/content. - forked_repo_name = payload.get("pull_request", {}).get("head", {}).get("repo", {}).get("full_name") - self.repo.create_remote(name=forked_remote_name, url=f"https://github.com/{forked_repo_name}") - else: - raise + if f'{forked_remote_name} already exists' not in str(error): + print(f'could not create remote from {url}, {error=}') # noqa: T201 + # handle the case where the name of the forked repo is not content + if github_event_path := os.getenv("GITHUB_EVENT_PATH"): + try: + payload = json.loads(github_event_path) + except ValueError: + print('failed to load GITHUB_EVENT_PATH') # noqa: T201 + raise ValueError(f'cannot checkout to the forked branch {branch_to_checkout} of the ' + f'owner {fork_owner}') + # forked repo name includes fork_owner + repo name, for example foo/content. + forked_repo_name = payload.get("pull_request", {}).get("head", {}).get("repo", {}).get("full_name") + self.repo.create_remote(name=forked_remote_name, url=f"https://github.com/{forked_repo_name}") + else: + raise forked_remote = self.repo.remote(forked_remote_name) forked_remote.fetch(branch_to_checkout) @@ -248,3 +254,100 @@ def get_doc_reviewer(content_roles: dict[str, Any]) -> str: if not (reviewer := content_roles.get(DOC_REVIEWER_KEY)): raise ValueError("Cannot get doc reviewer") return reviewer + + +def get_content_roles_from_blob() -> dict[str, Any] | None: + """ + Helper method to retrieve the 'content_roles.json` from + the `demisto/content` master blob. + + Returns: + - `dict[str, Any]` representing the content roles. See `.github/content_roles.json` for + the expected structure. If there's any failure getting/reading the resource, + we return `None`. + """ + + roles = None + + try: + response = requests.get(CONTENT_ROLES_BLOB_MASTER_URL) + response.raise_for_status() # Raise an error for bad status codes + print(f"Successfully retrieved {CONTENT_ROLES_FILENAME} from blob") + roles = response.json() + except (requests.RequestException, requests.HTTPError, json.JSONDecodeError, TypeError) as e: + print(f"{e.__class__.__name__} getting {CONTENT_ROLES_FILENAME} from blob: {e}.") + finally: + return roles + + +def get_content_roles(path: Path | None = None) -> dict[str, Any] | None: + """ + Helper method to retrieve the content roles config. + We first attempt to retrieve the content roles from `demisto/content` master blob. + If this attempt fails, we attempt to retrieve it from the filesystem. + + Arguments: + - `path` (``Path | None``): The path used to find the content_roles.json. + Used in case we can't retrieve the file from GitHub. + + Returns: + - `dict[str, Any]` representing the content roles. + """ + + print(f"Attempting to retrieve '{CONTENT_ROLES_FILENAME}' from blob {CONTENT_ROLES_BLOB_MASTER_URL}...") + roles = get_content_roles_from_blob() + + if not roles: + print(f"Unable to retrieve '{CONTENT_ROLES_FILENAME}' from blob. Attempting to retrieve from the filesystem...") + repo_root_path = get_repo_path(str(path)) + content_roles_path = repo_root_path / GITHUB_HIDDEN_DIR / CONTENT_ROLES_FILENAME + roles = load_json(content_roles_path) + + return roles + + +def get_repo_path(path: str = ".") -> Path: + """ + Helper method to get the path of the repo. + + Arguments: + - `path` (``str``): The path to search for the repo. + If nothing is defined we use the current working directory. + + Returns: + - `Path` of the root repo. If the repo doesn't exist or + it's bare, we exit. + """ + + try: + repo = git.Repo(path, search_parent_directories=True) + git_root = repo.working_tree_dir + if git_root: + return Path(git_root) + else: + raise ValueError + except (git.exc.InvalidGitRepositoryError, ValueError): + print("Unable to get repo root path. Terminating...") + sys.exit(1) + + +def get_metadata(pack_dirs: set[str]) -> list[dict]: + """ + Get the pack metadata. + + Args: + pack_dirs (set): paths to the packs that were changed + + Return: + - pack metadata dictionary + """ + pack_metadata_list = [] + + for pack_dir in pack_dirs: + if pack_metadata := get_pack_metadata(pack_dir): + print(f"pack metadata was retrieved for pack {pack_dir}") # noqa: T201 + pack_metadata_list.append(pack_metadata) + else: + print(f'Could not find pack support level for pack {pack_dir}') # noqa: T201 + + return pack_metadata_list diff --git a/Utils/github_workflow_scripts/utils_test.py b/.github/github_workflow_scripts/utils_test.py similarity index 64% rename from Utils/github_workflow_scripts/utils_test.py rename to .github/github_workflow_scripts/utils_test.py index a1bd4cdc7e28..86f5a0fc331b 100644 --- a/Utils/github_workflow_scripts/utils_test.py +++ b/.github/github_workflow_scripts/utils_test.py @@ -1,5 +1,8 @@ #!/usr/bin/env python3 +import json +from pathlib import Path import pytest +import requests_mock from typing import Any from utils import ( get_env_var, @@ -9,8 +12,13 @@ CONTRIBUTION_SECURITY_REVIEWER_KEY, TIM_REVIEWER_KEY, DOC_REVIEWER_KEY, - get_doc_reviewer + get_doc_reviewer, + CONTENT_ROLES_BLOB_MASTER_URL, + get_content_roles, + CONTENT_ROLES_FILENAME, + GITHUB_HIDDEN_DIR ) +from git import Repo class TestGetEnvVar: @@ -293,3 +301,156 @@ def test_exit_get_doc_reviewer( with pytest.raises(ValueError) as e: get_doc_reviewer(content_roles) assert e.type == ValueError + + +class TestGetContentRoles: + + content_roles: dict[str, Any] = { + CONTRIBUTION_REVIEWERS_KEY: ['prr1', 'prr2', 'prr3'], + 'CONTRIBUTION_TL': 'tl1', + CONTRIBUTION_SECURITY_REVIEWER_KEY: 'sr1', + 'ON_CALL_DEVS': ['ocd1', 'ocd2'], + DOC_REVIEWER_KEY: 'dr1', + TIM_REVIEWER_KEY: 'tr1' + } + + def test_get_content_roles_success( + self, + requests_mock: requests_mock.Mocker + ): + """ + Test successful retrieval of content_roles.json. + + Given: + - A content_roles.json + + When: + - The request to retrieve content_roles.json is successful. + + Then: + - The response includes the expected content role keys. + """ + + requests_mock.get( + CONTENT_ROLES_BLOB_MASTER_URL, + json=self.content_roles + ) + + actual_content_roles = get_content_roles() + assert actual_content_roles + assert CONTRIBUTION_REVIEWERS_KEY in actual_content_roles + assert CONTRIBUTION_SECURITY_REVIEWER_KEY in actual_content_roles + assert TIM_REVIEWER_KEY in actual_content_roles + + def test_get_content_roles_fail_blob( + self, + requests_mock: requests_mock.Mocker, + tmp_path: Path + ): + """ + Test failure to retrieve the content_roles.json blob + and successful retrieval from the filesystem. + + Given: + - A content_roles.json + + When: + - The request to retrieve content_roles.json is fails. + + Then: + - get_content_roles returns a populated dict. + """ + + # Mock failed request + requests_mock.get( + CONTENT_ROLES_BLOB_MASTER_URL, + status_code=404 + ) + + # Create repo and content_roles.json in fs + Repo.init(tmp_path) + (tmp_path / GITHUB_HIDDEN_DIR).mkdir() + content_roles_path = tmp_path / GITHUB_HIDDEN_DIR / CONTENT_ROLES_FILENAME + content_roles_path.touch() + content_roles_path.write_text(json.dumps(self.content_roles, indent=4)) + + actual_content_roles = get_content_roles(tmp_path) + + assert actual_content_roles + assert CONTRIBUTION_REVIEWERS_KEY in actual_content_roles + assert CONTRIBUTION_SECURITY_REVIEWER_KEY in actual_content_roles + assert TIM_REVIEWER_KEY in actual_content_roles + + def test_get_content_roles_invalid_json_blob( + self, + requests_mock: requests_mock.Mocker, + tmp_path: Path + ): + """ + Test failure to retrieve content_roles.json + and successful retrieval from the filesystem. + + Given: + - A content_roles.json + + When: + - The content_roles.json is invalid. + + Then: + - get_content_roles returns a populated dict. + """ + + requests_mock.get( + CONTENT_ROLES_BLOB_MASTER_URL, + json={"only_key"} + ) + + # Create repo and content_roles.json in fs + Repo.init(tmp_path) + (tmp_path / GITHUB_HIDDEN_DIR).mkdir() + content_roles_path = tmp_path / GITHUB_HIDDEN_DIR / CONTENT_ROLES_FILENAME + content_roles_path.touch() + content_roles_path.write_text(json.dumps(self.content_roles, indent=4)) + + actual_content_roles = get_content_roles(tmp_path) + + assert actual_content_roles + assert CONTRIBUTION_REVIEWERS_KEY in actual_content_roles + assert CONTRIBUTION_SECURITY_REVIEWER_KEY in actual_content_roles + assert TIM_REVIEWER_KEY in actual_content_roles + + def test_get_content_roles_invalid_json_blob_and_fs( + self, + requests_mock: requests_mock.Mocker, + tmp_path: Path + ): + """ + Test failure to retrieve content_roles.json + from the blob and from the filesystem. + + Given: + - A content_roles.json + + When: + - The content_roles.json is invalid in blob. + - The content_roles.json is invalid in filesystem. + + Then: + - get_content_roles returns nothing. + """ + + requests_mock.get( + CONTENT_ROLES_BLOB_MASTER_URL, + json={"only_key"} + ) + + # Create repo and content_roles.json in fs + Repo.init(tmp_path) + (tmp_path / GITHUB_HIDDEN_DIR).mkdir() + content_roles_path = tmp_path / GITHUB_HIDDEN_DIR / CONTENT_ROLES_FILENAME + content_roles_path.touch() + content_roles_path.write_text("{\"only_key\"}") + + actual_content_roles = get_content_roles(tmp_path) + + assert not actual_content_roles diff --git a/.github/workflows/autobump_rn.yml b/.github/workflows/autobump_rn.yml index 276b52491e41..22e8eb7cdcfb 100644 --- a/.github/workflows/autobump_rn.yml +++ b/.github/workflows/autobump_rn.yml @@ -45,4 +45,4 @@ jobs: git config --global user.email "bot@demisto.com" git config --global user.name "Content Bot" echo "Someone merged to master. Starting to check if conflicts in release notes created." - poetry run python Utils/github_workflow_scripts/autobump_release_notes/autobump_rn.py -g $CONTENTBOT_GH_ADMIN_TOKEN -r $RUN_ID + poetry run python .github/github_workflow_scripts/autobump_rn.py -g $CONTENTBOT_GH_ADMIN_TOKEN -r $RUN_ID diff --git a/.github/workflows/check-contribution-form-filled.yml b/.github/workflows/check-contribution-form-filled.yml index 6ba73972e403..6e90a7a7830d 100644 --- a/.github/workflows/check-contribution-form-filled.yml +++ b/.github/workflows/check-contribution-form-filled.yml @@ -29,5 +29,5 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | echo "Checking if contribution form needs to be filled for PR: $PR_NUMBER" - cd Utils/github_workflow_scripts + cd .github/github_workflow_scripts poetry run ./check_if_needs_to_fill_contribution_form.py --pr_number $PR_NUMBER --github_token $GITHUB_TOKEN diff --git a/.github/workflows/check-contributor-pack.yml b/.github/workflows/check-contributor-pack.yml index 02b6450e0eb0..063af725729c 100644 --- a/.github/workflows/check-contributor-pack.yml +++ b/.github/workflows/check-contributor-pack.yml @@ -34,5 +34,5 @@ jobs: echo "PR number is: $PR_NUMBER" echo "Target branch name is: $BRANCH_NAME" echo "Starting check of contributor packs" - poetry run python ./Utils/github_workflow_scripts/request_contributor_review.py --pr_number $PR_NUMBER --github_token $GITHUB_TOKEN --email_api_token $SENDGRID_EMAIL_API_KEY + poetry run python ./.github/github_workflow_scripts/request_contributor_review.py --pr_number $PR_NUMBER --github_token $GITHUB_TOKEN --email_api_token $SENDGRID_EMAIL_API_KEY echo "Finished check of contributor packs" diff --git a/.github/workflows/check-nightly-ok-label.yml b/.github/workflows/check-nightly-ok-label.yml deleted file mode 100644 index 6dc10a80a336..000000000000 --- a/.github/workflows/check-nightly-ok-label.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Check nightly-ok label - -on: - pull_request: - types: [opened, synchronize, labeled, unlabeled] - -jobs: - check_label: - runs-on: ubuntu-latest - if: github.repository == 'demisto/content' && github.event.pull_request.head.repo.fork == false - - steps: - - name: Checkout repo - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Check if files under .gitlab directory are changed - id: check-changes - run: | - CHANGED_FILES=$(git diff --name-only origin/master...origin/${{ github.head_ref || github.ref_name }}) - echo "All changed files:" - echo "${CHANGED_FILES}" - GITLAB_CHANGED_FILES=$( [[ $CHANGED_FILES == *".gitlab/ci"* ]] && echo true || echo false) - echo "Files in the .gitlab folder have changed: ${GITLAB_CHANGED_FILES}" - echo "gitlab_changed_files=$GITLAB_CHANGED_FILES" >> $GITHUB_OUTPUT - if [[ $GITLAB_CHANGED_FILES == true ]]; then - echo 'Files under .gitlab folder has changed, Will check if the PR has the `nightly-ok` label.' - else - echo 'Files in the .gitlab folder have not been changed.' - fi - - - name: Check if PR has the nightly-ok label - uses: actions/github-script@v7 - id: check-label - with: - script: | - const gitlabChangedFiles = ${{ steps.check-changes.outputs.gitlab_changed_files }}; - if(gitlabChangedFiles) { - console.log('Files under .gitlab folder has changed, Will check if the PR has the `nightly-ok` label.'); - const labels = context.payload.pull_request.labels.map(label => label.name); - const hasLabel = labels.includes('nightly-ok'); - if (hasLabel) { - console.log('All good, the PR has the `nightly-ok` label.'); - } else { - console.log('PR does not have the `nightly-ok` label. It is required when changing files under the `.gitlab` directory. Please run nightly using the Utils/gitlab_triggers/trigger_content_nightly_build.sh script, check that succeeded, and add the `nightly-ok` label'); - process.exit(1); // Exit with failure status if label is missing - } - } else { - console.log('Files in the .gitlab folder have not been changed.'); - } diff --git a/.github/workflows/check-partner-approved-label.yml b/.github/workflows/check-partner-approved-label.yml index 1a6ad62254fe..3288a09e866b 100644 --- a/.github/workflows/check-partner-approved-label.yml +++ b/.github/workflows/check-partner-approved-label.yml @@ -33,5 +33,5 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | echo "Checking label Partner-Approved for: $PR_NUMBER" - cd Utils/github_workflow_scripts + cd .github/github_workflow_scripts poetry run python check_if_partner_approved_label_exists.py --pr_number $PR_NUMBER --github_token $GITHUB_TOKEN diff --git a/.github/workflows/clean_stale_branches.yml b/.github/workflows/clean_stale_branches.yml index c07ffc962d16..c49ee1c948be 100644 --- a/.github/workflows/clean_stale_branches.yml +++ b/.github/workflows/clean_stale_branches.yml @@ -6,6 +6,7 @@ on: env: DAY_BEFORE_STALE: 30 DAY_BEFORE_CLOSE: 15 + OPERATION_PER_RUN: 1000 EXEMPT_LABELS: "Ignore Stale,External PR" jobs: @@ -23,7 +24,8 @@ jobs: days-before-issue-close: -1 days-before-pr-stale: ${{env.DAY_BEFORE_STALE}} days-before-pr-close: ${{env.DAY_BEFORE_CLOSE}} + operations-per-run: ${{env.OPERATION_PER_RUN}} stale-pr-message: "This PR is marked as 'Stale' because it has been open for ${{env.DAY_BEFORE_STALE}} days with no activity, it will be automatically closed in ${{env.DAY_BEFORE_CLOSE}} days if no activity will be done. To reset the counter just remove the 'Stale' label or make changes to update this PR. If you wish this PR will never be marked as 'Stale' add the 'Ignore Stale'" delete-branch: true remove-pr-stale-when-updated: true - exempt-pr-labels: ${{env.EXEMPT_LABELS}} \ No newline at end of file + exempt-pr-labels: ${{env.EXEMPT_LABELS}} diff --git a/.github/workflows/close_jira_issue_by_pr_merge.yml b/.github/workflows/close_jira_issue_by_pr_merge.yml index c5e7e5924615..b3d2a06c026e 100644 --- a/.github/workflows/close_jira_issue_by_pr_merge.yml +++ b/.github/workflows/close_jira_issue_by_pr_merge.yml @@ -37,5 +37,5 @@ jobs: INSTANCE_URL: ${{ secrets.GOLD_SERVER_URL_NG }} run: | echo "Checking for related Jira issues to PR: $PR_NUMBER" - cd Utils/github_workflow_scripts/jira_integration_scripts + cd .github/github_workflow_scripts/jira_integration_scripts poetry run python ./link_pr_to_jira_issue.py --url "$INSTANCE_URL" --pr_num "$PR_NUMBER" --pr_link "$PR_LINK" --pr_title "$PR_TITLE" --pr_body "$PR_BODY" --is_merged --username $USERNAME --password $PASSWORD diff --git a/.github/workflows/create-internal-pr-from-external.yml b/.github/workflows/create-internal-pr-from-external.yml index dfc8f072d0ea..d36744e91a3e 100644 --- a/.github/workflows/create-internal-pr-from-external.yml +++ b/.github/workflows/create-internal-pr-from-external.yml @@ -39,7 +39,7 @@ jobs: EVENT_PAYLOAD: ${{ toJson(github.event) }} run: | echo "Creating an internal PR from original merged external PR ${{ github.event.pull_request.html_url }}" - cd Utils/github_workflow_scripts + cd .github/github_workflow_scripts poetry run ./create_internal_pr.py echo "Finished Creating Internal PR" diff --git a/.github/workflows/handle-new-external-pr.yml b/.github/workflows/handle-new-external-pr.yml index a24b1cc039ca..9d7b2d0f5ae7 100644 --- a/.github/workflows/handle-new-external-pr.yml +++ b/.github/workflows/handle-new-external-pr.yml @@ -40,7 +40,7 @@ jobs: EVENT_PAYLOAD: ${{ toJson(github.event) }} run: | echo "Updating External PR ${{ github.event.pull_request.html_url }}" - cd Utils/github_workflow_scripts + cd .github/github_workflow_scripts poetry run ./handle_external_pr.py echo "Finished Handling External PR" @@ -54,11 +54,12 @@ jobs: LABEL: "Contribution Form Filled" - name: Send Notification + if: startsWith(github.event.pull_request.title, 'test') != true env: CONTENTBOT_GH_ADMIN_TOKEN: ${{ secrets.CONTENTBOT_GH_ADMIN_TOKEN }} EVENT_PAYLOAD: ${{ toJson(github.event) }} CORTEX_XSOAR_SLACK_TOKEN: ${{ secrets.CORTEX_XSOAR_SLACK_TOKEN }} run: | echo "Sending notification about External PR ${{ github.event.pull_request.html_url }}" - cd Utils/github_workflow_scripts + cd .github/github_workflow_scripts poetry run ./send_slack_message.py diff --git a/.github/workflows/link_edited_pr_to_jira_issue.yml b/.github/workflows/link_edited_pr_to_jira_issue.yml index 9f0e293bdfc8..945b9e8f8b5a 100644 --- a/.github/workflows/link_edited_pr_to_jira_issue.yml +++ b/.github/workflows/link_edited_pr_to_jira_issue.yml @@ -37,6 +37,6 @@ jobs: INSTANCE_URL: ${{ secrets.GOLD_SERVER_URL_NG }} run: | echo "Checking for related Jira issues to PR: $PR_NUMBER" - cd Utils/github_workflow_scripts/jira_integration_scripts + cd .github/github_workflow_scripts/jira_integration_scripts echo --pr_num $PR_NUMBER --pr_link $PR_LINK --pr_title $PR_TITLE --pr_body $PR_BODY --no-is_merged poetry run python ./link_pr_to_jira_issue.py --url "$INSTANCE_URL" --pr_num "$PR_NUMBER" --pr_link "$PR_LINK" --pr_title "$PR_TITLE" --pr_body "$PR_BODY" --no-is_merged --username $USERNAME --password $PASSWORD diff --git a/.github/workflows/pre-commit-reuse.yml b/.github/workflows/pre-commit-reuse.yml index b3eeec0d8b55..d4f54be0bc98 100644 --- a/.github/workflows/pre-commit-reuse.yml +++ b/.github/workflows/pre-commit-reuse.yml @@ -68,7 +68,7 @@ jobs: files: ".report_pytest.xml" - name: Create pack-wise pytest report - run: poetry run python Utils/github_workflow_scripts/parse_junit_per_pack.py + run: poetry run python .github/github_workflow_scripts/parse_junit_per_pack.py if: | always() && steps.check-pytest-junit-exists.outputs.files_exists == 'true' && diff --git a/.github/workflows/protect-files.yml b/.github/workflows/protect-files.yml new file mode 100644 index 000000000000..f2f2d1b6a983 --- /dev/null +++ b/.github/workflows/protect-files.yml @@ -0,0 +1,38 @@ +name: Protect Infrastructure Files + +on: + pull_request: + types: + - opened + - synchronize + +jobs: + protect-infra-files: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 100 # to speed up. changed-files will fetch more if necessary + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.x" + + - name: Get changed files + id: changed-files + uses: tj-actions/changed-files@v44 # disable-secrets-detection + with: + files: | + .gitlab/ci/.gitlab-ci.yml + + - name: Prevent changing protected files + if: steps.changed-files.outputs.any_changed == 'true' + env: + FILES: ${{ steps.changed-files.outputs.all_changed_files }} + run: | + for file in ${FILES}; do + echo "::error file=$file,line=1,endLine=1,title=Protected file modified::This file should not be changed in master.%0AIf you are sure it's necessary, ask for a force merge and explain the rationale." + done + exit 1 diff --git a/.github/workflows/protect-infra-directories.yml b/.github/workflows/protect-infra-directories-contributions.yml similarity index 75% rename from .github/workflows/protect-infra-directories.yml rename to .github/workflows/protect-infra-directories-contributions.yml index 346e8920a034..a19f2e0e22d4 100644 --- a/.github/workflows/protect-infra-directories.yml +++ b/.github/workflows/protect-infra-directories-contributions.yml @@ -1,4 +1,4 @@ -name: Protect Infrastructure Directories +name: Protect Infrastructure Directories (Contributions) on: pull_request: @@ -7,7 +7,7 @@ on: - synchronize jobs: - check_changes: + protect-non-packs: runs-on: ubuntu-latest if: 'startsWith(github.head_ref, ''contrib'') || (github.event.pull_request.head.repo.fork == true && contains(github.event.pull_request.base.ref, ''contrib''))' steps: @@ -25,4 +25,4 @@ jobs: - name: Check for changes in protected directories run: | - python Utils/github_workflow_scripts/check_protected_directories.py ${{ steps.changed-files.outputs.all_changed_files }} + python .github/github_workflow_scripts/check_protected_directories.py ${{ steps.changed-files.outputs.all_changed_files }} diff --git a/.github/workflows/run-secrets-detection.yml b/.github/workflows/run-secrets-detection.yml index 6f701a874c2b..5c11b22bef10 100644 --- a/.github/workflows/run-secrets-detection.yml +++ b/.github/workflows/run-secrets-detection.yml @@ -26,7 +26,7 @@ jobs: GOLD_SERVER_URL: ${{ secrets.GOLD_SERVER_URL_NG }} run: | echo "Run secrets detection for PR: $PR_NUMBER on branch: $BRANCH_NAME" - investigation_id=$(poetry run Utils/github_workflow_scripts/run_secrets_detection.py --pr_number $PR_NUMBER --branch_name $BRANCH_NAME --username $USERNAME --password $PASSWORD --gold_server_url $GOLD_SERVER_URL) + investigation_id=$(poetry run .github/github_workflow_scripts/run_secrets_detection.py --pr_number $PR_NUMBER --branch_name $BRANCH_NAME --username $USERNAME --password $PASSWORD --gold_server_url $GOLD_SERVER_URL) echo "INVESTIGATION_ID=$investigation_id" >> $GITHUB_ENV - name: Wait For Playbook To Finish env: @@ -35,4 +35,4 @@ jobs: GOLD_SERVER_URL: ${{ secrets.GOLD_SERVER_URL_NG_RESULT }} run: | echo "Invastigation id is: $INVESTIGATION_ID " - poetry run python ./Utils/github_workflow_scripts/run_secrets_detection_get_playbook_status.py -i $INVESTIGATION_ID -k $GOLD_API_KEY -ai $AUTH_ID --gold_server_url $GOLD_SERVER_URL + poetry run python ./.github/github_workflow_scripts/run_secrets_detection_get_playbook_status.py -i $INVESTIGATION_ID -k $GOLD_API_KEY -ai $AUTH_ID --gold_server_url $GOLD_SERVER_URL diff --git a/.github/workflows/sync-contribution-base-branch-on-change.yml b/.github/workflows/sync-contribution-base-branch-on-change.yml index 58052044d8b8..33f9c559c5a2 100644 --- a/.github/workflows/sync-contribution-base-branch-on-change.yml +++ b/.github/workflows/sync-contribution-base-branch-on-change.yml @@ -31,6 +31,6 @@ jobs: CONTENTBOT_GH_ADMIN_TOKEN: ${{ secrets.CONTENTBOT_GH_ADMIN_TOKEN }} run: | echo "Updating contribution base branch (contrib/*)" - cd Utils/github_workflow_scripts + cd .github/github_workflow_scripts poetry run ./sync_contrib_base.py --branch_name ${{ github.event.pull_request.base.ref }} echo "Finished updating base branch" diff --git a/.github/workflows/sync-contribution-base-branch.yml b/.github/workflows/sync-contribution-base-branch.yml index f0912f0a940b..a31132326a79 100644 --- a/.github/workflows/sync-contribution-base-branch.yml +++ b/.github/workflows/sync-contribution-base-branch.yml @@ -28,6 +28,6 @@ jobs: CONTENTBOT_GH_ADMIN_TOKEN: ${{ secrets.CONTENTBOT_GH_ADMIN_TOKEN }} run: | echo "Updating contribution base branches (contrib/*)" - cd Utils/github_workflow_scripts + cd .github/github_workflow_scripts poetry run ./sync_contrib_base.py echo "Finished updating base branches" diff --git a/.gitignore b/.gitignore index 8553f5cf0c14..af6b2ce557a7 100644 --- a/.gitignore +++ b/.gitignore @@ -21,8 +21,8 @@ CommonServerUserPython.py CommonServerPowerShell.ps1 demistomock.py demistomock.ps1 -!Tests/demistomock/demistomock.py -!Tests/demistomock/demistomock.ps1 +Tests/demistomock/demistomock.py +Tests/demistomock/demistomock.ps1 Tests/filter_file.txt Tests/filter_file_old.txt Tests/id_set.json diff --git a/.gitlab/ci/.gitlab-ci.yml b/.gitlab/ci/.gitlab-ci.yml index e555781ee8b1..70bb9f73f58f 100644 --- a/.gitlab/ci/.gitlab-ci.yml +++ b/.gitlab/ci/.gitlab-ci.yml @@ -5,3 +5,12 @@ include: - file: "/.gitlab/ci/content-ci/ci/.gitlab-ci.yml" ref: $INFRA_BRANCH project: "${CI_PROJECT_NAMESPACE}/infra" + +default: + image: ${DOCKER_IO}/devdemisto/gitlab-content-ci:1.0.0.64455 + artifacts: + expire_in: 30 days + paths: + - ${CI_PROJECT_DIR}/artifacts/* + - ${CI_PROJECT_DIR}/pipeline_jobs_folder/* + when: always \ No newline at end of file diff --git a/.pre-commit-config_template.yaml b/.pre-commit-config_template.yaml index 96a2255b64d6..82fc887937ce 100644 --- a/.pre-commit-config_template.yaml +++ b/.pre-commit-config_template.yaml @@ -42,14 +42,15 @@ repos: args:ci: - --config=nightly_ruff.toml skip:docker_autoupdate: true -- repo: https://github.com/pre-commit/mirrors-autopep8 - rev: v2.0.4 +- repo: https://github.com/hhatto/autopep8 + rev: v2.3.1 hooks: - id: autopep8 exclude_support_level:ci: - community exclude_support_level:nightly: - community + skip:nightly: true - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 hooks: @@ -61,7 +62,7 @@ repos: - --show-error-codes - --follow-imports=silent - --allow-redefinition - exclude: test_data|tests_data|.venv|.*_test.py$|infrastructure_tests|.vulture_whitelist.py|demistomock.py|Templates|conftest.py + exclude: test_data|tests_data|.venv|.*_test.py$|infrastructure_tests|.vulture_whitelist.py|demistomock.py|Templates|conftest.py|Utils/download_packs_and_docker_images.py language: system entry: mypy @@ -199,7 +200,6 @@ repos: require_serial: true args: - validate - - --skip-depth-one-file - --skip-depth-one-folder - --skip-integration-script-file-name - --skip-markdown diff --git a/Packs/AHA/Author_image.png b/Packs/AHA/Author_image.png deleted file mode 100644 index 24759893f4b4..000000000000 Binary files a/Packs/AHA/Author_image.png and /dev/null differ diff --git a/Packs/ANYRUN/doc_files/Detonate_File_ANYRUN.png b/Packs/ANYRUN/doc_files/Detonate_File_ANYRUN.png new file mode 100644 index 000000000000..4e067c00b5b2 Binary files /dev/null and b/Packs/ANYRUN/doc_files/Detonate_File_ANYRUN.png differ diff --git a/Packs/ANYRUN/doc_files/Detonate_File_From_URL_ANYRUN.png b/Packs/ANYRUN/doc_files/Detonate_File_From_URL_ANYRUN.png new file mode 100644 index 000000000000..270415b0151e Binary files /dev/null and b/Packs/ANYRUN/doc_files/Detonate_File_From_URL_ANYRUN.png differ diff --git a/Packs/ANYRUN/doc_files/Detonate_URL_ANYRUN.png b/Packs/ANYRUN/doc_files/Detonate_URL_ANYRUN.png new file mode 100644 index 000000000000..864c7bc09ff6 Binary files /dev/null and b/Packs/ANYRUN/doc_files/Detonate_URL_ANYRUN.png differ diff --git a/Packs/AWS-ACM/.pack-ignore b/Packs/AWS-ACM/.pack-ignore index af5a63b37225..7d2c300ca049 100644 --- a/Packs/AWS-ACM/.pack-ignore +++ b/Packs/AWS-ACM/.pack-ignore @@ -1,3 +1,3 @@ [file:AWS-ACM.yml] -ignore=BA108,BA109,IN124,BA124 +ignore=BA108,BA109,BA124 diff --git a/Packs/AWS-ACM/Integrations/AWS-ACM/AWS-ACM.yml b/Packs/AWS-ACM/Integrations/AWS-ACM/AWS-ACM.yml index 8a790e85ebf0..3d2cc82e0004 100644 --- a/Packs/AWS-ACM/Integrations/AWS-ACM/AWS-ACM.yml +++ b/Packs/AWS-ACM/Integrations/AWS-ACM/AWS-ACM.yml @@ -469,7 +469,7 @@ script: description: The certificate chain that contains the root certificate issued by the certificate authority (CA). type: string description: Retrieves a certificate specified by an ARN and its certificate chain . The chain is an ordered list of certificates that contains the end entity certificate, intermediate certificates of subordinate CAs, and the root certificate in that order. The certificate and certificate chain are base64 encoded. If you want to decode the certificate to see the individual fields, you can use OpenSSL. - dockerimage: demisto/boto3py3:1.0.0.87582 + dockerimage: demisto/boto3py3:1.0.0.100496 subtype: python3 tests: - ACM-Test diff --git a/Packs/AWS-ACM/ReleaseNotes/1_1_36.md b/Packs/AWS-ACM/ReleaseNotes/1_1_36.md new file mode 100644 index 000000000000..7139d3650a8a --- /dev/null +++ b/Packs/AWS-ACM/ReleaseNotes/1_1_36.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS - ACM + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100496*. diff --git a/Packs/AWS-ACM/pack_metadata.json b/Packs/AWS-ACM/pack_metadata.json index 843c0c686f32..117de8829811 100644 --- a/Packs/AWS-ACM/pack_metadata.json +++ b/Packs/AWS-ACM/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - ACM", "description": "Amazon Web Services Certificate Manager Service (acm)", "support": "xsoar", - "currentVersion": "1.1.35", + "currentVersion": "1.1.36", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS-AccessAnalyzer/.pack-ignore b/Packs/AWS-AccessAnalyzer/.pack-ignore index 74a0666b4a0d..b2aac4de6f0b 100644 --- a/Packs/AWS-AccessAnalyzer/.pack-ignore +++ b/Packs/AWS-AccessAnalyzer/.pack-ignore @@ -1,3 +1,3 @@ [file:AWS-AccessAnalyzer.yml] -ignore=BA108,BA109,IN126,IN124 +ignore=BA108,BA109,IN126 diff --git a/Packs/AWS-AccessAnalyzer/Integrations/AWS-AccessAnalyzer/AWS-AccessAnalyzer.yml b/Packs/AWS-AccessAnalyzer/Integrations/AWS-AccessAnalyzer/AWS-AccessAnalyzer.yml index 6727d47dd6b5..94c49dc36ee5 100755 --- a/Packs/AWS-AccessAnalyzer/Integrations/AWS-AccessAnalyzer/AWS-AccessAnalyzer.yml +++ b/Packs/AWS-AccessAnalyzer/Integrations/AWS-AccessAnalyzer/AWS-AccessAnalyzer.yml @@ -310,7 +310,7 @@ script: name: roleSessionDuration description: Updates findings with the new values provided in the request. name: aws-access-analyzer-update-findings - dockerimage: demisto/boto3py3:1.0.0.91694 + dockerimage: demisto/boto3py3:1.0.0.100468 isfetch: true runonce: false script: '-' diff --git a/Packs/AWS-AccessAnalyzer/ReleaseNotes/1_1_32.md b/Packs/AWS-AccessAnalyzer/ReleaseNotes/1_1_32.md new file mode 100644 index 000000000000..cea94f578498 --- /dev/null +++ b/Packs/AWS-AccessAnalyzer/ReleaseNotes/1_1_32.md @@ -0,0 +1,3 @@ +#### Integrations +##### AWS - AccessAnalyzer +- Updated the Docker image to: *demisto/boto3py3:1.0.0.98661*. diff --git a/Packs/AWS-AccessAnalyzer/ReleaseNotes/1_1_33.md b/Packs/AWS-AccessAnalyzer/ReleaseNotes/1_1_33.md new file mode 100644 index 000000000000..9f11d55bf318 --- /dev/null +++ b/Packs/AWS-AccessAnalyzer/ReleaseNotes/1_1_33.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS - AccessAnalyzer + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100468*. diff --git a/Packs/AWS-AccessAnalyzer/pack_metadata.json b/Packs/AWS-AccessAnalyzer/pack_metadata.json index 331b742c7d46..52abbe0c401f 100644 --- a/Packs/AWS-AccessAnalyzer/pack_metadata.json +++ b/Packs/AWS-AccessAnalyzer/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - AccessAnalyzer", "description": "Amazon Web Services IAM Access Analyzer", "support": "xsoar", - "currentVersion": "1.1.31", + "currentVersion": "1.1.33", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS-CloudTrail/.pack-ignore b/Packs/AWS-CloudTrail/.pack-ignore index 77fd75cda556..e71a1fb64604 100644 --- a/Packs/AWS-CloudTrail/.pack-ignore +++ b/Packs/AWS-CloudTrail/.pack-ignore @@ -1,5 +1,5 @@ [file:AWS-CloudTrail.yml] -ignore=BA108,BA109,IN124,BA124 +ignore=BA108,BA109,BA124 [known_words] cloudtrail diff --git a/Packs/AWS-CloudTrail/Integrations/AWS-CloudTrail/AWS-CloudTrail.yml b/Packs/AWS-CloudTrail/Integrations/AWS-CloudTrail/AWS-CloudTrail.yml index 54f66b64d1e7..c9ca20c25224 100644 --- a/Packs/AWS-CloudTrail/Integrations/AWS-CloudTrail/AWS-CloudTrail.yml +++ b/Packs/AWS-CloudTrail/Integrations/AWS-CloudTrail/AWS-CloudTrail.yml @@ -431,7 +431,7 @@ script: - contextPath: AWS.CloudTrail.TrailStatus.LatestDigestDeliveryError description: Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket. type: string - dockerimage: demisto/boto3py3:1.0.0.91694 + dockerimage: demisto/boto3py3:1.0.0.100468 runonce: false script: '' subtype: python3 diff --git a/Packs/AWS-CloudTrail/ReleaseNotes/1_1_3.md b/Packs/AWS-CloudTrail/ReleaseNotes/1_1_3.md new file mode 100644 index 000000000000..b0e8a39b8aac --- /dev/null +++ b/Packs/AWS-CloudTrail/ReleaseNotes/1_1_3.md @@ -0,0 +1,3 @@ +#### Integrations +##### AWS - CloudTrail +- Updated the Docker image to: *demisto/boto3py3:1.0.0.98661*. diff --git a/Packs/AWS-CloudTrail/ReleaseNotes/1_1_4.md b/Packs/AWS-CloudTrail/ReleaseNotes/1_1_4.md new file mode 100644 index 000000000000..5deb2d3e9e4f --- /dev/null +++ b/Packs/AWS-CloudTrail/ReleaseNotes/1_1_4.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS - CloudTrail + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100468*. diff --git a/Packs/AWS-CloudTrail/pack_metadata.json b/Packs/AWS-CloudTrail/pack_metadata.json index 8dac5906e7c4..03ac2ad494b0 100644 --- a/Packs/AWS-CloudTrail/pack_metadata.json +++ b/Packs/AWS-CloudTrail/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - CloudTrail", "description": "Amazon Web Services CloudTrail.", "support": "xsoar", - "currentVersion": "1.1.2", + "currentVersion": "1.1.4", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS-CloudWatchLogs/.pack-ignore b/Packs/AWS-CloudWatchLogs/.pack-ignore index b020a5421cbd..d4398d1044b3 100644 --- a/Packs/AWS-CloudWatchLogs/.pack-ignore +++ b/Packs/AWS-CloudWatchLogs/.pack-ignore @@ -1,3 +1,3 @@ [file:AWS-CloudWatchLogs.yml] -ignore=BA108,BA109,IN124,BA124 +ignore=BA108,BA109,BA124 diff --git a/Packs/AWS-CloudWatchLogs/Integrations/AWS-CloudWatchLogs/AWS-CloudWatchLogs.yml b/Packs/AWS-CloudWatchLogs/Integrations/AWS-CloudWatchLogs/AWS-CloudWatchLogs.yml index c5d81873c567..1584b2bffb10 100644 --- a/Packs/AWS-CloudWatchLogs/Integrations/AWS-CloudWatchLogs/AWS-CloudWatchLogs.yml +++ b/Packs/AWS-CloudWatchLogs/Integrations/AWS-CloudWatchLogs/AWS-CloudWatchLogs.yml @@ -461,7 +461,7 @@ script: description: The name of the log group. type: string description: Lists the specified metric filters. You can list all the metric filters or filter the results by log name, prefix, metric name, or metric namespace. - dockerimage: demisto/boto3py3:1.0.0.88114 + dockerimage: demisto/boto3py3:1.0.0.100468 tests: - No Tests fromversion: 5.0.0 diff --git a/Packs/AWS-CloudWatchLogs/ReleaseNotes/1_2_22.md b/Packs/AWS-CloudWatchLogs/ReleaseNotes/1_2_22.md new file mode 100644 index 000000000000..af27e046e7b3 --- /dev/null +++ b/Packs/AWS-CloudWatchLogs/ReleaseNotes/1_2_22.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS - CloudWatchLogs + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100468*. diff --git a/Packs/AWS-CloudWatchLogs/pack_metadata.json b/Packs/AWS-CloudWatchLogs/pack_metadata.json index 0dbca84e37fa..878139acb69c 100644 --- a/Packs/AWS-CloudWatchLogs/pack_metadata.json +++ b/Packs/AWS-CloudWatchLogs/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - CloudWatchLogs", "description": "Amazon Web Services CloudWatch Logs (logs).", "support": "xsoar", - "currentVersion": "1.2.21", + "currentVersion": "1.2.22", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", @@ -18,4 +18,4 @@ "marketplacev2", "xpanse" ] -} +} \ No newline at end of file diff --git a/Packs/AWS-EC2/.pack-ignore b/Packs/AWS-EC2/.pack-ignore index b13fe63bde41..d64f049990bf 100644 --- a/Packs/AWS-EC2/.pack-ignore +++ b/Packs/AWS-EC2/.pack-ignore @@ -1,5 +1,5 @@ [file:AWS-EC2.yml] -ignore=BA108,BA109,IN124 +ignore=BA108,BA109 [file:README.md] ignore=RM106 diff --git a/Packs/AWS-EC2/Integrations/AWS-EC2/AWS-EC2.py b/Packs/AWS-EC2/Integrations/AWS-EC2/AWS-EC2.py index e285c6c5b719..a2e9b5eb7437 100644 --- a/Packs/AWS-EC2/Integrations/AWS-EC2/AWS-EC2.py +++ b/Packs/AWS-EC2/Integrations/AWS-EC2/AWS-EC2.py @@ -3076,6 +3076,10 @@ def main(): demisto.debug(f'Command being called is {command}') + if (ROLE_NAME and not IS_ARN_PROVIDED): + support_multithreading() + demisto.debug('using multiple accounts') + match command: case 'test-module': return_results(test_module()) diff --git a/Packs/AWS-EC2/Integrations/AWS-EC2/AWS-EC2.yml b/Packs/AWS-EC2/Integrations/AWS-EC2/AWS-EC2.yml index ce756547d9e0..5d353fe7f7d0 100644 --- a/Packs/AWS-EC2/Integrations/AWS-EC2/AWS-EC2.yml +++ b/Packs/AWS-EC2/Integrations/AWS-EC2/AWS-EC2.yml @@ -4130,7 +4130,7 @@ script: type: String description: Creates a VPC endpoint. name: aws-ec2-create-vpc-endpoint - dockerimage: demisto/boto3py3:1.0.0.91323 + dockerimage: demisto/boto3py3:1.0.0.100468 runonce: false script: '-' subtype: python3 diff --git a/Packs/AWS-EC2/ReleaseNotes/1_4_10.md b/Packs/AWS-EC2/ReleaseNotes/1_4_10.md new file mode 100644 index 000000000000..fca47899a223 --- /dev/null +++ b/Packs/AWS-EC2/ReleaseNotes/1_4_10.md @@ -0,0 +1,7 @@ + +#### Integrations + +##### AWS - EC2 + +- Fixed an issue where running commands on a large amount of accounts would result in a timeout. +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100294*. \ No newline at end of file diff --git a/Packs/AWS-EC2/ReleaseNotes/1_4_11.md b/Packs/AWS-EC2/ReleaseNotes/1_4_11.md new file mode 100644 index 000000000000..3770ce9c2500 --- /dev/null +++ b/Packs/AWS-EC2/ReleaseNotes/1_4_11.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS - EC2 + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100468*. diff --git a/Packs/AWS-EC2/ReleaseNotes/1_4_8.md b/Packs/AWS-EC2/ReleaseNotes/1_4_8.md new file mode 100644 index 000000000000..dda3f96d4327 --- /dev/null +++ b/Packs/AWS-EC2/ReleaseNotes/1_4_8.md @@ -0,0 +1,3 @@ +## AWS - EC2 + +- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release. \ No newline at end of file diff --git a/Packs/AWS-EC2/ReleaseNotes/1_4_9.md b/Packs/AWS-EC2/ReleaseNotes/1_4_9.md new file mode 100644 index 000000000000..30414d94d223 --- /dev/null +++ b/Packs/AWS-EC2/ReleaseNotes/1_4_9.md @@ -0,0 +1,5 @@ +#### Scripts + +##### AwsEC2SyncAccounts + +- Documentation and metadata improvements. \ No newline at end of file diff --git a/Packs/AWS-EC2/Scripts/AwsEC2SyncAccounts/AwsEC2SyncAccounts.py b/Packs/AWS-EC2/Scripts/AwsEC2SyncAccounts/AwsEC2SyncAccounts.py index 1bf1dce9e2bf..a0e6b38941fa 100644 --- a/Packs/AWS-EC2/Scripts/AwsEC2SyncAccounts/AwsEC2SyncAccounts.py +++ b/Packs/AWS-EC2/Scripts/AwsEC2SyncAccounts/AwsEC2SyncAccounts.py @@ -108,6 +108,8 @@ def update_ec2_instance(account_ids: list[str], ec2_instance_name: str) -> str: return f'Successfully updated ***{ec2_instance_name}*** with accounts:' except StopIteration: raise DemistoException(f'AWS - EC2 instance {ec2_instance_name!r} was not found or is not an AWS - EC2 instance.') + except (TypeError, KeyError) as e: + raise DemistoException(f'Please make sure a "Core REST API" instance is enabled.\nError: {e}') except Exception as e: raise DemistoException(f'Unexpected error while configuring AWS - EC2 instance with accounts {accounts_as_str!r}:\n{e}') diff --git a/Packs/AWS-EC2/pack_metadata.json b/Packs/AWS-EC2/pack_metadata.json index d3b5ad596722..1c22348814ad 100644 --- a/Packs/AWS-EC2/pack_metadata.json +++ b/Packs/AWS-EC2/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - EC2", "description": "Amazon Web Services Elastic Compute Cloud (EC2)", "support": "xsoar", - "currentVersion": "1.4.7", + "currentVersion": "1.4.11", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS-EKS/Integrations/AWSEKS/AWSEKS.yml b/Packs/AWS-EKS/Integrations/AWSEKS/AWSEKS.yml index 41aab58ae8b0..a538e577f212 100644 --- a/Packs/AWS-EKS/Integrations/AWSEKS/AWSEKS.yml +++ b/Packs/AWS-EKS/Integrations/AWSEKS/AWSEKS.yml @@ -538,7 +538,7 @@ script: script: '-' type: python subtype: python3 - dockerimage: demisto/boto3py3:1.0.0.91694 + dockerimage: demisto/boto3py3:1.0.0.98661 fromversion: 6.9.0 tests: - AWSEKS-Test diff --git a/Packs/AWS-EKS/ReleaseNotes/1_0_3.md b/Packs/AWS-EKS/ReleaseNotes/1_0_3.md new file mode 100644 index 000000000000..37c8fffb45e8 --- /dev/null +++ b/Packs/AWS-EKS/ReleaseNotes/1_0_3.md @@ -0,0 +1,3 @@ +#### Integrations +##### AWS-EKS +- Updated the Docker image to: *demisto/boto3py3:1.0.0.98661*. diff --git a/Packs/AWS-EKS/pack_metadata.json b/Packs/AWS-EKS/pack_metadata.json index 718302130736..207de2fcdf42 100644 --- a/Packs/AWS-EKS/pack_metadata.json +++ b/Packs/AWS-EKS/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - EKS", "description": "The AWS EKS integration allows for the management and operation of Amazon Elastic Kubernetes Service (EKS) clusters.", "support": "xsoar", - "currentVersion": "1.0.2", + "currentVersion": "1.0.3", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS-Enrichment-Remediation/Playbooks/AWS_-_Enrichment.yml b/Packs/AWS-Enrichment-Remediation/Playbooks/AWS_-_Enrichment.yml index 08498cd60ebe..af2c46cd67ff 100644 --- a/Packs/AWS-Enrichment-Remediation/Playbooks/AWS_-_Enrichment.yml +++ b/Packs/AWS-Enrichment-Remediation/Playbooks/AWS_-_Enrichment.yml @@ -53,7 +53,7 @@ tasks: { "position": { "x": 210, - "y": 1730 + "y": 2170 } } note: false @@ -78,7 +78,7 @@ tasks: brand: AWS - EC2 nexttasks: '#none#': - - "2" + - "25" scriptarguments: groupIds: complex: @@ -702,8 +702,8 @@ tasks: view: |- { "position": { - "x": 880, - "y": 1370 + "x": 940, + "y": 1350 } } "24": @@ -714,7 +714,7 @@ tasks: isoversize: false nexttasks: '#none#': - - "2" + - "25" note: false quietmode: 0 scriptarguments: @@ -742,7 +742,111 @@ tasks: { "position": { "x": 880, - "y": 1550 + "y": 1610 + } + } + "25": + conditions: + - condition: + - - left: + iscontext: true + value: + complex: + filters: + - - left: + iscontext: true + value: + simple: modules.brand + operator: isEqualString + right: + value: + simple: AWS - System Manager + - - left: + iscontext: true + value: + simple: modules.state + operator: isEqualString + right: + value: + simple: active + root: modules + operator: isExists + right: + value: {} + label: "yes" + continueonerrortype: "" + id: "25" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + nexttasks: + '#default#': + - "2" + "yes": + - "26" + note: false + quietmode: 0 + separatecontext: false + skipunavailable: false + task: + brand: "" + description: Determines if the AWS - Systems Manager integration instance is configured. + id: 25c42040-ec5b-4942-85c9-50aa659a4842 + iscommand: false + name: Is AWS - Systems Manager enabled? + type: condition + version: -1 + taskid: 25c42040-ec5b-4942-85c9-50aa659a4842 + timertriggers: [] + type: condition + view: |- + { + "position": { + "x": 470, + "y": 1820 + } + } + "26": + continueonerrortype: "" + id: "26" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + nexttasks: + '#none#': + - "2" + note: false + quietmode: 0 + scriptarguments: + instance_id: + simple: ${AWS.EC2.Instances.InstanceId} + region: + simple: ${AWS.EC2.Instances.Region} + roleArn: + simple: ${AssumeRoleArn} + roleSessionName: + simple: AWS-SSM-Command + type_name: + simple: Instance Information + separatecontext: false + skipunavailable: false + task: + brand: AWS - System Manager + description: A list of inventory items returned by the request. + id: 47310a34-97d6-406b-884c-6c98a5855f45 + iscommand: true + name: Get Instance ID information from SSM Inventory list. + script: AWS - System Manager|||aws-ssm-inventory-entry-list + type: regular + version: -1 + taskid: 47310a34-97d6-406b-884c-6c98a5855f45 + timertriggers: [] + type: regular + view: |- + { + "position": { + "x": 480, + "y": 2000 } } view: |- @@ -753,12 +857,11 @@ view: |- "13_16_#default#": 0.35, "15_16_#default#": 0.22, "18_16_#default#": 0.35, - "20_21_#default#": 0.31, - "23_2_#default#": 0.33 + "20_21_#default#": 0.31 }, "paper": { "dimensions": { - "height": 2575, + "height": 3015, "width": 1190, "x": 210, "y": -780 @@ -766,37 +869,37 @@ view: |- } } inputs: -- key: "" +- key: "AwsIP" + value: + complex: + accessor: remoteip + root: alert + required: true + description: AWS IP address in the alert. + playbookInputQuery: +- key: AWSAssumeRoleName value: {} required: false - description: "" + description: If assuming roles for AWS, this is the name of the role to assume (should be the same for all organizations). + playbookInputQuery: +- description: "" + key: "" playbookInputQuery: - query: "" - queryEntity: indicators - results: daterange: fromdate: "0001-01-01T00:00:00Z" - todate: "0001-01-01T00:00:00Z" + fromdatelicenseval: "0001-01-01T00:00:00Z" period: by: "" - byto: "" byfrom: "" - tovalue: - fromvalue: + byto: "" field: "" - fromdatelicenseval: "0001-01-01T00:00:00Z" + fromvalue: + tovalue: + todate: "0001-01-01T00:00:00Z" + query: "" + queryEntity: indicators + results: runFromLastJobTime: true -- key: AwsIP - value: - complex: - root: alert - accessor: remoteip - required: true - description: AWS IP in alert - playbookInputQuery: -- description: If assuming roles for AWS, this is the name of the role to assume (should be the same for all organizations). - key: AWSAssumeRoleName - playbookInputQuery: required: false value: {} outputs: @@ -809,7 +912,25 @@ outputs: - contextPath: AWSHierarchy description: AWS account hierarchy information. type: unknown -quiet: true +- contextPath: AWS.SSM + description: AWS SSM information. + type: unknown fromversion: 6.5.0 tests: - No tests (auto formatted) +contentitemexportablefields: + contentitemfields: {} +inputSections: +- description: Generic group for inputs. + inputs: + - AwsIP + - AWSAssumeRoleName + name: General (Inputs group) +outputSections: +- description: Generic group for outputs. + name: General (Outputs group) + outputs: + - AWS.EC2.Instances + - AWS.EC2.SecurityGroups + - AWSHierarchy + - AWS.SSM diff --git a/Packs/AWS-Enrichment-Remediation/Playbooks/AWS_-_Enrichment_README.md b/Packs/AWS-Enrichment-Remediation/Playbooks/AWS_-_Enrichment_README.md index 1825dceebc86..70b7602b9ad8 100644 --- a/Packs/AWS-Enrichment-Remediation/Playbooks/AWS_-_Enrichment_README.md +++ b/Packs/AWS-Enrichment-Remediation/Playbooks/AWS_-_Enrichment_README.md @@ -11,19 +11,21 @@ This playbook does not use any sub-playbooks. ### Integrations * AWS - EC2 +* AWS - System Manager ### Scripts -* AWSAccountHierarchy * Set +* AWSAccountHierarchy ### Commands -* aws-ec2-describe-ipam-resource-discoveries +* aws-ec2-describe-instances * aws-ec2-describe-security-groups -* aws-ec2-get-ipam-discovered-public-addresses * aws-ec2-describe-regions -* aws-ec2-describe-instances +* aws-ec2-get-ipam-discovered-public-addresses +* aws-ssm-inventory-entry-list +* aws-ec2-describe-ipam-resource-discoveries ## Playbook Inputs @@ -31,9 +33,9 @@ This playbook does not use any sub-playbooks. | **Name** | **Description** | **Default Value** | **Required** | | --- | --- | --- | --- | -| Indicator Query | Indicators matching the indicator query will be used as playbook input | | Optional | | AwsIP | AWS IP in alert | alert.remoteip | Required | | AWSAssumeRoleName | If assuming roles for AWS, this is the name of the role to assume \(should be the same for all organizations\). | | Optional | +| Indicator Query | Indicators matching the indicator query will be used as playbook input. | | Optional | ## Playbook Outputs @@ -44,6 +46,7 @@ This playbook does not use any sub-playbooks. | AWS.EC2.Instances | AWS EC2 information. | unknown | | AWS.EC2.SecurityGroups | AWS Security group information. | unknown | | AWSHierarchy | AWS account hierarchy information. | unknown | +| AWS.SSM | AWS SSM information. | unknown | ## Playbook Image diff --git a/Packs/AWS-Enrichment-Remediation/Playbooks/playbook-Function_Deployment_-_AWS.yml b/Packs/AWS-Enrichment-Remediation/Playbooks/playbook-Function_Deployment_-_AWS.yml new file mode 100644 index 000000000000..3ed9ab9e1df6 --- /dev/null +++ b/Packs/AWS-Enrichment-Remediation/Playbooks/playbook-Function_Deployment_-_AWS.yml @@ -0,0 +1,1820 @@ +id: Function Deployment - AWS +version: -1 +name: Function Deployment - AWS +description: |- + This playbook automates the deployment of an AWS Lambda function to manage resources within an Amazon EKS cluster. It ensures that all necessary configurations are created, updated, and verified. + + ### Setup + + - **Describe EKS Cluster**: Gather essential details of the EKS cluster. + - **Create IAM Role**: Set up a new IAM role for the Lambda function. + - **Create and Attach Policy**: Define and attach a policy to the IAM role to grant necessary permissions. + + ### Authentication Mode Check + + - **Verify Authentication Mode**: Ensure the current authentication mode allows API access. + - **If not**: Update the cluster authentication mode to permit API access. + + ### Access Entry Configuration + + - **Create Access Entry**: Establish a new access entry in the EKS cluster. + - **Associate Access Policy**: Link the access policy with the created access entry. + - **Update Access Entry**: Apply the latest configurations to the access entry. + + ### VPC and Security Group Setup + + - **Describe VPCs**: Identify the appropriate VPC for the Lambda function. + - **Create Security Group**: Define a security group to manage Lambda function traffic. + - **Set Ingress Rules**: Configure ingress rules for the security group. + + ### VPC Endpoint Creation + + - **Create VPC Endpoint for eks-auth**: Establish a VPC endpoint for EKS authentication. + - **Check for Errors**: Verify if there are any errors during the creation of the VPC endpoint. + - **If errors**: Handle and log them. + - **Verify VPC Endpoint Existence**: Ensure the VPC endpoint already exists. + - **If exists**: Proceed with the next steps. + + ### Lambda Function Deployment + + - **Download Kubernetes Library**: Fetch the necessary Kubernetes library. + - **Publish AWS Lambda Layer**: Publish a new layer version for the AWS Lambda function. + - **Create Lambda Code**: Develop the Lambda function code. + - **Zip Lambda Code**: Compress the Lambda function code for deployment. + - **Create AWS Lambda Function**: Deploy the Lambda function using the zipped code. + + ### Resolution + + - **Final Verification**: Ensure all operations have been successfully completed. + - **Completion**: Confirm the deployment process is finished, ensuring robust management of EKS authentication through AWS Lambda. + + This playbook provides a comprehensive, automated approach to deploying an AWS Lambda function for managing resources within an EKS cluster, efficiently handling all configurations and potential errors. + + ### Required Integration + + #### AWS IAM (Identity and Access Management) + - [AWS IAM API Documentation](https://docs.aws.amazon.com/IAM/latest/APIReference/Welcome.html) + - [Cortex XSOAR AWS IAM Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSIAM/) + + #### AWS EC2 (Elastic Compute Cloud) + - [AWS EC2 API Documentation](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Welcome.html) + - [Cortex XSOAR AWS EC2 Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSEC2/) + + #### AWS EKS (Elastic Kubernetes Service) + - [AWS EKS API Documentation](https://docs.aws.amazon.com/eks/latest/APIReference/Welcome.html) + - [Cortex XSOAR AWS EKS Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSEKS/) + + #### AWS Lambda + - [AWS Lambda API Documentation](https://docs.aws.amazon.com/lambda/latest/dg/API_Reference.html) + - [Cortex XSOAR AWS Lambda Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSLambda/). +starttaskid: "0" +tasks: + "0": + id: "0" + taskid: 54dd5378-1342-4d82-84fa-99649b7926f7 + type: start + task: + id: 54dd5378-1342-4d82-84fa-99649b7926f7 + version: -1 + name: "" + iscommand: false + brand: "" + description: '' + nexttasks: + '#none#': + - "1" + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 40 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "1": + id: "1" + taskid: 2fa1b9d8-7711-49e3-80cd-e572b487aebf + type: regular + task: + id: 2fa1b9d8-7711-49e3-80cd-e572b487aebf + version: -1 + name: EKS - Describe Cluster + description: Describes an Amazon EKS cluster. + script: '|||aws-eks-describe-cluster' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "2" + scriptarguments: + cluster_name: + simple: ${inputs.ClusterName} + region: + simple: ${inputs.region} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 170 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "2": + id: "2" + taskid: 935b9483-60e9-44e2-8571-358a764a0fb3 + type: regular + task: + id: 935b9483-60e9-44e2-8571-358a764a0fb3 + version: -1 + name: Create a role for the Lambda function + description: Creates a new role for your AWS account. + script: '|||aws-iam-create-role' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "3" + scriptarguments: + assumeRolePolicyDocument: + simple: |- + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + } + roleName: + simple: ${inputs.LambdaRoleName} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 340 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "3": + id: "3" + taskid: b647b62d-4c70-47d1-8fe5-6551ccf7b334 + type: regular + task: + id: b647b62d-4c70-47d1-8fe5-6551ccf7b334 + version: -1 + name: Create policy + description: Creates a new managed policy for your AWS account. This operation creates a policy version with a version identifier of v1 and sets v1 as the policy's default version. + script: '|||aws-iam-create-policy' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "4" + scriptarguments: + policyDocument: + simple: |- + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "eks:*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeNetworkInterfaces", + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:DescribeInstances", + "ec2:AttachNetworkInterface" + ], + "Resource": "*" + } + ] + } + policyName: + simple: ${inputs.LambdaPolicyName} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 500 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "4": + id: "4" + taskid: 2cef0fd3-6d20-4f8a-8e7d-3fa27d1638ae + type: regular + task: + id: 2cef0fd3-6d20-4f8a-8e7d-3fa27d1638ae + version: -1 + name: Attach the policy to the Lambda role + description: Attaches the specified managed policy to the specified IAM Entity. + script: '|||aws-iam-attach-policy' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "11" + scriptarguments: + entityName: + simple: ${AWS.IAM.Roles.RoleName} + policyArn: + simple: ${AWS.IAM.Policies.Arn} + type: + simple: Role + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 660 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "5": + id: "5" + taskid: a59535eb-3371-4fa5-8777-14a41c7bf807 + type: regular + task: + id: a59535eb-3371-4fa5-8777-14a41c7bf807 + version: -1 + name: EKS - Create access entry + description: Creates an access entry. + script: '|||aws-eks-create-access-entry' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "6" + scriptarguments: + cluster_name: + simple: ${inputs.ClusterName} + principal_arn: + simple: ${AWS.IAM.Roles.Arn} + region: + simple: ${inputs.region} + retry-count: + simple: "1" + retry-interval: + simple: "5" + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 1160 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "6": + id: "6" + taskid: 5e11c0d3-f12f-43f6-884b-1b07e5b580ae + type: regular + task: + id: 5e11c0d3-f12f-43f6-884b-1b07e5b580ae + version: -1 + name: EKS - Associate access policy + description: Describes an Amazon EKS cluster. + script: '|||aws-eks-associate-access-policy' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "7" + scriptarguments: + cluster_name: + simple: ${inputs.ClusterName} + policy_arn: + simple: arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy + principal_arn: + simple: ${AWS.IAM.Roles.Arn} + region: + simple: ${inputs.region} + type: + simple: cluster + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 1320 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "7": + id: "7" + taskid: 61b4a2bb-6609-4701-8dd3-ba257660ab25 + type: regular + task: + id: 61b4a2bb-6609-4701-8dd3-ba257660ab25 + version: -1 + name: EKS - Update access entry + description: Updates an access entry. + script: '|||aws-eks-update-access-entry' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "18" + scriptarguments: + cluster_name: + simple: ${inputs.ClusterName} + kubernetes_groups: + simple: cluster-admin + principal_arn: + simple: ${AWS.IAM.Roles.Arn} + region: + simple: ${inputs.region} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 1480 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "8": + id: "8" + taskid: 45528df7-bb7f-42c5-8bd0-761a893853fb + type: regular + task: + id: 45528df7-bb7f-42c5-8bd0-761a893853fb + version: -1 + name: AWS Lambda - Publish layer version + description: Creates an Lambda layer from a ZIP archive. + script: '|||aws-lambda-publish-layer-version' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "24" + scriptarguments: + compatible-architectures: + simple: ${inputs.LayerArchitecture} + compatible-runtimes: + simple: ${inputs.LayerRuntime} + layer-name: + simple: ${inputs.LayerName} + region: + simple: ${inputs.region} + zip-file: + complex: + root: File + filters: + - - operator: containsString + left: + value: + simple: File.Name + iscontext: true + right: + value: + simple: inputs.LibraryName + iscontext: true + ignorecase: true + accessor: EntryID + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 2900 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "10": + id: "10" + taskid: a13603f8-4bba-4f72-8d38-d02255a1e429 + type: title + task: + id: a13603f8-4bba-4f72-8d38-d02255a1e429 + version: -1 + name: Done + type: title + iscommand: false + brand: "" + description: '' + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 3710 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "11": + id: "11" + taskid: 634c042a-b835-496e-83a9-37cde721742a + type: condition + task: + id: 634c042a-b835-496e-83a9-37cde721742a + version: -1 + name: Check if the authentication mode allows API + description: Checks if the cluster allows authentication via API. + type: condition + iscommand: false + brand: "" + nexttasks: + '#default#': + - "12" + "yes": + - "5" + separatecontext: false + conditions: + - label: "yes" + condition: + - - operator: containsGeneral + left: + value: + simple: AWS.EKS.DescribeCluster.accessConfig.authenticationMode + iscontext: true + right: + value: + simple: API + ignorecase: true + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 820 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "12": + id: "12" + taskid: 7fb729bd-cda7-4087-8e76-f68a2ec88100 + type: regular + task: + id: 7fb729bd-cda7-4087-8e76-f68a2ec88100 + version: -1 + name: Update cluster authentication mode + description: 'Updates an Amazon EKS cluster configuration. Only one type of update can be allowed. Potentially harmful: once the authentication mode was updated to ''API'' it is irreversible.' + script: '|||aws-eks-update-cluster-config' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "5" + scriptarguments: + authentication_mode: + simple: API_AND_CONFIG_MAP + cluster_name: + simple: ${inputs.ClusterName} + region: + simple: ${inputs.region} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 70, + "y": 990 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "13": + id: "13" + taskid: fb201ae1-4187-4978-8190-6aee638abd62 + type: regular + task: + id: fb201ae1-4187-4978-8190-6aee638abd62 + version: -1 + name: Download Kubernetes library + description: The script installs a Python library using pip and archive it. + scriptName: DownloadAndArchivePythonLibrary + type: regular + iscommand: false + brand: "" + nexttasks: + '#none#': + - "8" + scriptarguments: + library_name: + simple: ${inputs.LibraryName} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 2740 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "14": + id: "14" + taskid: 2c183d15-aaf8-499f-8922-9df6eab414ca + type: regular + task: + id: 2c183d15-aaf8-499f-8922-9df6eab414ca + version: -1 + name: Create the Lambda code + description: | + Creates a file (using the given data input or entry ID) and uploads it to the current investigation War Room. + scriptName: FileCreateAndUploadV2 + type: regular + iscommand: false + brand: "" + nexttasks: + '#none#': + - "15" + scriptarguments: + data: + simple: ${inputs.FunctionCode} + filename: + simple: lambda_function.py + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 3190 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "15": + id: "15" + taskid: 0399941e-47ee-459e-8aa7-60c150b1a1c6 + type: regular + task: + id: 0399941e-47ee-459e-8aa7-60c150b1a1c6 + version: -1 + name: Zip Lambda code + description: Zip a file and upload to war room + scriptName: ZipFile + type: regular + iscommand: false + brand: "" + nexttasks: + '#none#': + - "23" + scriptarguments: + entryID: + complex: + root: File + filters: + - - operator: isEqualString + left: + value: + simple: File.Name + iscontext: true + right: + value: + simple: lambda_function.py + ignorecase: true + accessor: EntryID + zipName: + simple: code + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 3360 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "16": + id: "16" + taskid: fedbbd4d-58b3-4e54-84f1-29b233743f5a + type: regular + task: + id: fedbbd4d-58b3-4e54-84f1-29b233743f5a + version: -1 + name: Create VPC endpoint for eks-auth + description: Creates a VPC endpoint. + script: '|||aws-ec2-create-vpc-endpoint' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "34" + scriptarguments: + region: + simple: ${inputs.region} + securityGroupIds: + simple: ${AWS.EKS.DescribeCluster.resourcesVpcConfig.securityGroupIds} + serviceName: + simple: com.amazonaws.${inputs.region}.eks-auth + subnetIds: + simple: ${AWS.EKS.DescribeCluster.resourcesVpcConfig.subnetIds} + vpcEndpointType: + simple: Interface + vpcId: + simple: ${AWS.EC2.Vpcs.VpcId} + separatecontext: false + continueonerror: true + continueonerrortype: "" + view: |- + { + "position": { + "x": 200, + "y": 2180 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "18": + id: "18" + taskid: 2d82ee8b-6bcd-4d34-88c0-f8b1d9ceb10b + type: regular + task: + id: 2d82ee8b-6bcd-4d34-88c0-f8b1d9ceb10b + version: -1 + name: Describe VPCs + description: Describes one or more of your VPCs. + script: '|||aws-ec2-describe-vpcs' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "19" + scriptarguments: + region: + simple: ${inputs.region} + vpcIds: + simple: ${AWS.EKS.DescribeCluster.resourcesVpcConfig.vpcId} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 1645 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "19": + id: "19" + taskid: 19380d04-9017-4578-85d1-6539b235c361 + type: regular + task: + id: 19380d04-9017-4578-85d1-6539b235c361 + version: -1 + name: Create security group + description: Creates a security group. + script: '|||aws-ec2-create-security-group' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "20" + scriptarguments: + description: + simple: This security group allow the response function to communicate with the cluster + groupName: + simple: ${inputs.SecurityGroupName} + region: + simple: ${inputs.region} + vpcId: + simple: ${AWS.EC2.Vpcs.VpcId} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 1810 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "20": + id: "20" + taskid: b3871919-30b5-4e02-8fcd-fecaeaead4e8 + type: regular + task: + id: b3871919-30b5-4e02-8fcd-fecaeaead4e8 + version: -1 + name: Create an ingress rule for the security group + description: Adds ingress rule to a security group. + script: '|||aws-ec2-authorize-security-group-ingress-rule' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "16" + - "22" + scriptarguments: + cidrIp: + simple: ${AWS.EC2.Vpcs.CidrBlock} + fromPort: + simple: "443" + groupId: + simple: ${AWS.EC2.SecurityGroups.GroupId} + ipProtocol: + simple: tcp + region: + simple: ${inputs.region} + toPort: + simple: "443" + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 1980 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "22": + id: "22" + taskid: 7a865610-19c6-415a-89d1-ae1d9f558f10 + type: regular + task: + id: 7a865610-19c6-415a-89d1-ae1d9f558f10 + version: -1 + name: Create VPC endpoint for eks-auth + description: Creates a VPC endpoint. + script: '|||aws-ec2-create-vpc-endpoint' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "31" + scriptarguments: + region: + simple: ${inputs.region} + securityGroupIds: + simple: ${AWS.EKS.DescribeCluster.resourcesVpcConfig.securityGroupIds} + serviceName: + simple: com.amazonaws.${inputs.region}.eks + subnetIds: + simple: ${AWS.EKS.DescribeCluster.resourcesVpcConfig.subnetIds} + vpcEndpointType: + simple: Interface + vpcId: + simple: ${AWS.EC2.Vpcs.VpcId} + separatecontext: false + continueonerror: true + continueonerrortype: "" + view: |- + { + "position": { + "x": 700, + "y": 2180 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "23": + id: "23" + taskid: 25d80d46-e3c7-4fdc-8ad4-67d04e22dfd5 + type: regular + task: + id: 25d80d46-e3c7-4fdc-8ad4-67d04e22dfd5 + version: -1 + name: AWS Lambda - Create function + description: Creates a Lambda function. To create a function, you need a deployment package and an execution role. + script: '|||aws-lambda-create-function' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "10" + scriptarguments: + code: + complex: + root: File + filters: + - - operator: isEqualString + left: + value: + simple: File.Name + iscontext: true + right: + value: + simple: code.zip + ignorecase: true + accessor: EntryID + functionName: + simple: ${inputs.FunctionName} + handler: + simple: ${inputs.FunctionHandler} + layers: + simple: ${AWS.Lambda.Layers.LayerVersionArn} + packageType: + simple: Zip + region: + simple: ${inputs.region} + role: + simple: ${AWS.IAM.Roles.Arn} + runtime: + simple: ${inputs.FunctionRuntime} + vpcConfig: + complex: + root: AWS.EKS.DescribeCluster + accessor: resourcesVpcConfig + transformers: + - operator: IgnoreFieldsFromJson + args: + fields: + value: + simple: clusterSecurityGroupId,endpointPrivateAccess,endpointPublicAccess,publicAccessCidrs,vpcId + json_object: {} + - operator: Stringify + - operator: replace + args: + limit: {} + replaceWith: + value: + simple: SecurityGroupIds + toReplace: + value: + simple: securityGroupIds + - operator: replace + args: + limit: {} + replaceWith: + value: + simple: SubnetIds + toReplace: + value: + simple: subnetIds + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 3530 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "24": + id: "24" + taskid: 0d583674-cc5b-4a9d-8398-e5649245ac0e + type: title + task: + id: 0d583674-cc5b-4a9d-8398-e5649245ac0e + version: -1 + name: Create Function + type: title + iscommand: false + brand: "" + description: '' + nexttasks: + '#none#': + - "14" + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 3060 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "31": + id: "31" + taskid: f00e38d5-c3f5-4d1d-8642-34550bad0aab + type: regular + task: + id: f00e38d5-c3f5-4d1d-8642-34550bad0aab + version: -1 + name: Check the error returned for eks-auth VPC endpoint creation + description: Get the error(s) associated with a given entry/entries. Use ${lastCompletedTaskEntries} to check the previous task entries. The automation will return an array of the error contents from those entries. + scriptName: GetErrorsFromEntry + type: regular + iscommand: false + brand: "" + nexttasks: + '#none#': + - "32" + scriptarguments: + entry_id: + simple: ${lastCompletedTaskEntries} + extend-context: + simple: ErrorEntries.eksAuth= + ignore-outputs: + simple: "true" + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 700, + "y": 2350 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "32": + id: "32" + taskid: 55fce217-fa07-432c-8bf7-8e327e33f763 + type: condition + task: + id: 55fce217-fa07-432c-8bf7-8e327e33f763 + version: -1 + name: Check if the VPC endpoint already exists + description: Checks if a VPC endpoint for eks-auth already exists. + type: condition + iscommand: false + brand: "" + nexttasks: + '#default#': + - "33" + "yes": + - "13" + separatecontext: false + conditions: + - label: "yes" + condition: + - - operator: containsString + left: + value: + simple: ErrorEntries.eksAuth + iscontext: true + right: + value: + simple: conflicting + continueonerrortype: "" + view: |- + { + "position": { + "x": 700, + "y": 2520 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "33": + id: "33" + taskid: ee2a8892-22db-46c2-8fdd-4dd5271a8529 + type: regular + task: + id: ee2a8892-22db-46c2-8fdd-4dd5271a8529 + version: -1 + name: VPC endpoint for eks-auth creation error + description: Prints an error entry with a given message. + scriptName: PrintErrorEntry + type: regular + iscommand: false + brand: "" + nexttasks: + '#none#': + - "10" + scriptarguments: + message: + simple: |- + Unable to create a VPC endpoint for eks-auth due to the following error: + + ${ErrorEntries.eksAuth} + separatecontext: false + continueonerror: true + continueonerrortype: "" + view: |- + { + "position": { + "x": 1070, + "y": 2740 + } + } + note: false + timertriggers: [] + ignoreworker: false + fieldMapping: + - incidentfield: Error Message + output: + simple: VPC Endpoint Creation Failed + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "34": + id: "34" + taskid: b4b59d4a-77f1-49cc-8852-0c8fbe92178d + type: regular + task: + id: b4b59d4a-77f1-49cc-8852-0c8fbe92178d + version: -1 + name: Check the error returned for eks VPC endpoint creation + description: Get the error(s) associated with a given entry/entries. Use ${lastCompletedTaskEntries} to check the previous task entries. The automation will return an array of the error contents from those entries. + scriptName: GetErrorsFromEntry + type: regular + iscommand: false + brand: "" + nexttasks: + '#none#': + - "35" + scriptarguments: + entry_id: + simple: ${lastCompletedTaskEntries} + extend-context: + simple: ErrorEntries.eks= + ignore-outputs: + simple: "true" + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 200, + "y": 2350 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "35": + id: "35" + taskid: 81bbd239-7d66-4dba-8916-574494b0fdb0 + type: condition + task: + id: 81bbd239-7d66-4dba-8916-574494b0fdb0 + version: -1 + name: Check if the VPC endpoint already exists + description: Checks if a VPC endpoint for eks already exists. + type: condition + iscommand: false + brand: "" + nexttasks: + '#default#': + - "36" + "yes": + - "13" + separatecontext: false + conditions: + - label: "yes" + condition: + - - operator: containsString + left: + value: + simple: ErrorEntries.eks + iscontext: true + right: + value: + simple: conflicting + continueonerrortype: "" + view: |- + { + "position": { + "x": 200, + "y": 2520 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "36": + id: "36" + taskid: d1ccb111-1357-494a-8c5b-314bb9a13c19 + type: regular + task: + id: d1ccb111-1357-494a-8c5b-314bb9a13c19 + version: -1 + name: VPC endpoint for eks creation error + description: Prints an error entry with a given message. + scriptName: PrintErrorEntry + type: regular + iscommand: false + brand: "" + nexttasks: + '#none#': + - "10" + scriptarguments: + message: + simple: |- + Unable to create a VPC endpoint for eks due to the following error: + + ${ErrorEntries.eks} + separatecontext: false + continueonerror: true + continueonerrortype: "" + view: |- + { + "position": { + "x": -180, + "y": 2740 + } + } + note: false + timertriggers: [] + ignoreworker: false + fieldMapping: + - incidentfield: Error Message + output: + simple: VPC Endpoint Creation Failed + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false +view: |- + { + "linkLabelsPosition": { + "11_5_yes": 0.17, + "32_13_yes": 0.38, + "35_13_yes": 0.43 + }, + "paper": { + "dimensions": { + "height": 3735, + "width": 1630, + "x": -180, + "y": 40 + } + } + } +inputs: +- key: ClusterName + value: {} + required: false + description: The name of the cluster. + playbookInputQuery: +- key: region + value: {} + required: false + description: The region of the cluster. + playbookInputQuery: +- key: FunctionName + value: {} + required: false + description: The name of the Lambda function. + playbookInputQuery: +- key: FunctionCode + value: {} + required: false + description: Entry ID of the uploaded base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you. + playbookInputQuery: +- key: FunctionRuntime + value: {} + required: false + description: "The runtime environment for the function.\t" + playbookInputQuery: +- key: FunctionHandler + value: {} + required: false + description: "The name of the method within your code that Lambda calls to execute your function.\t" + playbookInputQuery: +- key: LayerName + value: {} + required: false + description: The name to assign to the new Lambda layer. + playbookInputQuery: +- key: LayerRuntime + value: {} + required: false + description: "The name of the method within your code that Lambda calls to execute your function.\t" + playbookInputQuery: +- key: LayerArchitecture + value: {} + required: false + description: "A list of compatible architectures.\t" + playbookInputQuery: +- key: LibraryName + value: {} + required: false + description: "The Python library to download and attach to the Lambda Function. \ne.g.\nTo be able to use the kubernetes library, use 'kubernetes' and the script will download and pack its files." + playbookInputQuery: +- key: LambdaRoleName + value: {} + required: false + description: The lambda role name to create. + playbookInputQuery: +- key: LambdaPolicyName + value: {} + required: false + description: The lambda policy name to create. + playbookInputQuery: +- key: SecurityGroupName + value: {} + required: false + description: The security group name to create. + playbookInputQuery: +inputSections: +- inputs: + - ClusterName + - region + name: General (Inputs group) + description: Generic group for inputs +- inputs: + - FunctionName + - FunctionCode + - FunctionRuntime + - FunctionHandler + - LayerName + - LayerRuntime + - LayerArchitecture + - LibraryName + name: FunctionParameters + description: This group holds inputs related to the function creation. +- inputs: + - LambdaRoleName + - LambdaPolicyName + name: IAMParameters + description: This group holds inputs related to IAM. +- inputs: + - SecurityGroupName + name: EC2Parameters + description: This group holds inputs related to EC2. +outputSections: +- outputs: + - AWS.EKS.DescribeCluster.name + - AWS.EKS.DescribeCluster.arn + - AWS.EKS.DescribeCluster.createdAt + - AWS.EKS.DescribeCluster.version + - AWS.EKS.DescribeCluster.endpoint + - AWS.EKS.DescribeCluster.roleArn + - AWS.EKS.DescribeCluster.identity + - AWS.EKS.DescribeCluster.status + - AWS.EKS.DescribeCluster.clientRequestToken + - AWS.EKS.DescribeCluster.platformVersion + - AWS.EKS.DescribeCluster.tags + - AWS.EKS.DescribeCluster.id + - AWS.EKS.DescribeCluster.resourcesVpcConfig.subnetIds + - AWS.EKS.DescribeCluster.resourcesVpcConfig.securityGroupIds + - AWS.EKS.DescribeCluster.resourcesVpcConfig.clusterSecurityGroupId + - AWS.EKS.DescribeCluster.resourcesVpcConfig.vpcId + - AWS.EKS.DescribeCluster.resourcesVpcConfig.endpointPublicAccess + - AWS.EKS.DescribeCluster.resourcesVpcConfig.endpointPrivateAccess + - AWS.EKS.DescribeCluster.resourcesVpcConfig.publicAccessCidrs + - AWS.EKS.DescribeCluster.kubernetesNetworkConfig.serviceIpv4Cidr + - AWS.EKS.DescribeCluster.kubernetesNetworkConfig.serviceIpv6Cidr + - AWS.EKS.DescribeCluster.kubernetesNetworkConfig.ipFamily + - AWS.EKS.DescribeCluster.logging.clusterLogging + - AWS.EKS.DescribeCluster.certificateAuthority.data + - AWS.EKS.DescribeCluster.encryptionConfig.resources + - AWS.EKS.DescribeCluster.encryptionConfig.provider + - AWS.EKS.DescribeCluster.connectorConfig.activationId + - AWS.EKS.DescribeCluster.connectorConfig.activationCode + - AWS.EKS.DescribeCluster.connectorConfig.activationExpiry + - AWS.EKS.DescribeCluster.connectorConfig.provider + - AWS.EKS.DescribeCluster.connectorConfig.roleArn + - AWS.EKS.DescribeCluster.health.issues + - AWS.EKS.DescribeCluster.outpostConfig.outpostArns + - AWS.EKS.DescribeCluster.outpostConfig.controlPlaneInstanceType + - AWS.EKS.DescribeCluster.outpostConfig.controlPlanePlacement + - AWS.EKS.DescribeCluster.accessConfig.bootstrapClusterCreatorAdminPermissions + - AWS.EKS.DescribeCluster.accessConfig.authenticationMode + - AWS.IAM.Roles.RoleName + - AWS.IAM.Roles.RoleId + - AWS.IAM.Roles.Arn + - AWS.IAM.Roles.CreateDate + - AWS.IAM.Roles.Path + - AWS.IAM.Roles.AssumeRolePolicyDocument + - AWS.IAM.Roles.Description + - AWS.IAM.Roles.MaxSessionDuration + - AWS.IAM.Policies.PolicyName + - AWS.IAM.Policies.PolicyId + - AWS.IAM.Policies.Arn + - AWS.IAM.Policies.Path + - AWS.IAM.Policies.DefaultVersionId + - AWS.IAM.Policies.Description + - AWS.IAM.Policies.CreateDate + - AWS.IAM.Policies.UpdateDate + - AWS.EKS.CreateAccessEntry.clusterName + - AWS.EKS.CreateAccessEntry.principalArn + - AWS.EKS.CreateAccessEntry.kubernetesGroups + - AWS.EKS.CreateAccessEntry.accessEntryArn + - AWS.EKS.CreateAccessEntry.createdAt + - AWS.EKS.CreateAccessEntry.modifiedAt + - AWS.EKS.CreateAccessEntry.tags + - AWS.EKS.CreateAccessEntry.username + - AWS.EKS.CreateAccessEntry.type + - AWS.EKS.AssociatedAccessPolicy.clusterName + - AWS.EKS.AssociatedAccessPolicy.principalArn + - AWS.EKS.AssociatedAccessPolicy.policyArn + - AWS.EKS.AssociatedAccessPolicy.associatedAt + - AWS.EKS.AssociatedAccessPolicy.modifiedAt + - AWS.EKS.AssociatedAccessPolicy.accessScope.type + - AWS.EKS.AssociatedAccessPolicy.accessScope.namespaces + - AWS.EKS.UpdateAccessEntry.clusterName + - AWS.EKS.UpdateAccessEntry.principalArn + - AWS.EKS.UpdateAccessEntry.kubernetesGroups + - AWS.EKS.UpdateAccessEntry.accessEntryArn + - AWS.EKS.UpdateAccessEntry.createdAt + - AWS.EKS.UpdateAccessEntry.modifiedAt + - AWS.EKS.UpdateAccessEntry.tags + - AWS.EKS.UpdateAccessEntry.username + - AWS.EKS.UpdateAccessEntry.type + - AWS.Lambda.Layers.LayerVersionArn + - AWS.Lambda.Layers.LayerArn + - AWS.Lambda.Layers.Description + - AWS.Lambda.Layers.CreatedDate + - AWS.Lambda.Layers.Version + - AWS.Lambda.Layers.CompatibleRuntimes + - AWS.Lambda.Layers.CompatibleArchitectures + - AWS.Lambda.Functions.FunctionName + - AWS.Lambda.Functions.FunctionArn + - AWS.Lambda.Functions.Runtime + - AWS.Lambda.Functions.Role + - AWS.Lambda.Functions.Handler + - AWS.Lambda.Functions.Description + - AWS.Lambda.Functions.PackageType + - AWS.Lambda.Functions.LastModified + - AWS.Lambda.Functions.Timeout + - AWS.Lambda.Functions.VpcConfig.SubnetIds + - AWS.Lambda.Functions.VpcConfig.SecurityGroupIds + - AWS.Lambda.Functions.VpcConfig.VpcId + - AWS.Lambda.Functions.VpcConfig.Ipv6AllowedForDualStack + - AWS.EKS.UpdateCluster.clusterName + - AWS.EKS.UpdateCluster.id + - AWS.EKS.UpdateCluster.status + - AWS.EKS.UpdateCluster.type + - AWS.EKS.UpdateCluster.params + - AWS.EKS.UpdateCluster.createdAt + - File.Name + - File.EntryID + - File.Type + - File.Extension + - File.SHA256 + - ZipFile.ZippedFile + - AWS.EC2.Vpcs.VpcEndpoint.VpcEndpointId + - AWS.EC2.Vpcs.VpcEndpoint.State + - AWS.EC2.Vpcs.VpcEndpoint.ServiceName + - AWS.EC2.Vpcs.VpcEndpoint.VpcId + - AWS.EC2.Vpcs.VpcEndpoint.EndpointType + - AWS.EC2.Vpcs.VpcId + - AWS.EC2.Vpcs.AccountId + - AWS.EC2.Vpcs.State + - AWS.EC2.Vpcs.CidrBlock + - AWS.EC2.Vpcs.Tags.Key + - AWS.EC2.Vpcs.Tags.Value + - AWS.EC2.Vpcs.Tags.Ipv6CidrBlockAssociationSet.AssociationId + - AWS.EC2.Vpcs.Tags.Ipv6CidrBlockAssociationSet.Ipv6CidrBlock + - AWS.EC2.Vpcs.Tags.Ipv6CidrBlockAssociationSet.Ipv6CidrBlockState.State + - AWS.EC2.Vpcs.Tags.Ipv6CidrBlockAssociationSet.Ipv6CidrBlockState.StatusMessage + - AWS.EC2.Vpcs.Tags.CidrBlockAssociationSet.AssociationId + - AWS.EC2.Vpcs.Tags.CidrBlockAssociationSet.CidrBlock + - AWS.EC2.Vpcs.Tags.CidrBlockAssociationSet.CidrBlockState.State + - AWS.EC2.Vpcs.Tags.CidrBlockAssociationSet.CidrBlockState.StatusMessage + - AWS.EC2.SecurityGroups.GroupName + - AWS.EC2.SecurityGroups.Description + - AWS.EC2.SecurityGroups.VpcId + - AWS.EC2.SecurityGroups.GroupId + - AWS.EC2.SecurityGroups.AccountId + - ErrorEntries + name: General (Outputs group) + description: Generic group for outputs +outputs: +- contextPath: AWS.EKS.DescribeCluster.name + description: The name of your cluster. +- contextPath: AWS.EKS.DescribeCluster.arn + description: The Amazon Resource Name (ARN) of the cluster. +- contextPath: AWS.EKS.DescribeCluster.createdAt + description: The creation date of the object. +- contextPath: AWS.EKS.DescribeCluster.version + description: The Kubernetes server version for the cluster. +- contextPath: AWS.EKS.DescribeCluster.endpoint + description: The endpoint for your Kubernetes API server. +- contextPath: AWS.EKS.DescribeCluster.roleArn + description: The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to Amazon Web Services API operations on your behalf. +- contextPath: AWS.EKS.DescribeCluster.identity + description: The identity provider information for the cluster. +- contextPath: AWS.EKS.DescribeCluster.status + description: The current status of the cluster. +- contextPath: AWS.EKS.DescribeCluster.clientRequestToken + description: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. +- contextPath: AWS.EKS.DescribeCluster.platformVersion + description: The platform version of your Amazon EKS cluster. +- contextPath: AWS.EKS.DescribeCluster.tags + description: A dictionary containing metadata for categorization and organization. +- contextPath: AWS.EKS.DescribeCluster.id + description: The ID of your local Amazon EKS cluster on an Amazon Web Services Outpost. +- contextPath: AWS.EKS.DescribeCluster.resourcesVpcConfig.subnetIds + description: The subnets associated with your cluster. +- contextPath: AWS.EKS.DescribeCluster.resourcesVpcConfig.securityGroupIds + description: The security groups associated with the cross-account elastic network interfaces that are used to allow communication between your nodes and the Kubernetes control plane. +- contextPath: AWS.EKS.DescribeCluster.resourcesVpcConfig.clusterSecurityGroupId + description: The cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. +- contextPath: AWS.EKS.DescribeCluster.resourcesVpcConfig.vpcId + description: The VPC associated with your cluster. +- contextPath: AWS.EKS.DescribeCluster.resourcesVpcConfig.endpointPublicAccess + description: Whether the public API server endpoint is enabled. +- contextPath: AWS.EKS.DescribeCluster.resourcesVpcConfig.endpointPrivateAccess + description: This parameter indicates whether the Amazon EKS private API server endpoint is enabled. +- contextPath: AWS.EKS.DescribeCluster.resourcesVpcConfig.publicAccessCidrs + description: The CIDR blocks that are allowed access to your cluster’s public Kubernetes API server endpoint. +- contextPath: AWS.EKS.DescribeCluster.kubernetesNetworkConfig.serviceIpv4Cidr + description: The CIDR block that Kubernetes Pod and Service object IP addresses are assigned from. +- contextPath: AWS.EKS.DescribeCluster.kubernetesNetworkConfig.serviceIpv6Cidr + description: The CIDR block that Kubernetes Pod and Service IP addresses are assigned from if you created a 1.21 or later cluster with version 1.10.1 or later of the Amazon VPC CNI add-on and specified ipv6 for ipFamily when you created the cluster. +- contextPath: AWS.EKS.DescribeCluster.kubernetesNetworkConfig.ipFamily + description: The IP family used to assign Kubernetes Pod and Service objects IP addresses. +- contextPath: AWS.EKS.DescribeCluster.logging.clusterLogging + description: The cluster control plane logging configuration for your cluster. +- contextPath: AWS.EKS.DescribeCluster.certificateAuthority.data + description: The Base64-encoded certificate data required to communicate with your cluster. +- contextPath: AWS.EKS.DescribeCluster.encryptionConfig.resources + description: Specifies the resources to be encrypted. The only supported value is secrets. +- contextPath: AWS.EKS.DescribeCluster.encryptionConfig.provider + description: Key Management Service (KMS) key. +- contextPath: AWS.EKS.DescribeCluster.connectorConfig.activationId + description: A unique ID associated with the cluster for registration purposes. +- contextPath: AWS.EKS.DescribeCluster.connectorConfig.activationCode + description: A unique code associated with the cluster for registration purposes. +- contextPath: AWS.EKS.DescribeCluster.connectorConfig.activationExpiry + description: The expiration time of the connected cluster. +- contextPath: AWS.EKS.DescribeCluster.connectorConfig.provider + description: The cluster’s cloud service provider. +- contextPath: AWS.EKS.DescribeCluster.connectorConfig.roleArn + description: The Amazon Resource Name (ARN) of the role to communicate with services from the connected Kubernetes cluster. +- contextPath: AWS.EKS.DescribeCluster.health.issues + description: An object representing the health issues of your local Amazon EKS cluster on an Amazon Web Services Outpost. +- contextPath: AWS.EKS.DescribeCluster.outpostConfig.outpostArns + description: An object representing the configuration of your local Amazon EKS cluster on an Amazon Web Services Outpost. +- contextPath: AWS.EKS.DescribeCluster.outpostConfig.controlPlaneInstanceType + description: The Amazon EC2 instance type used for the control plane. +- contextPath: AWS.EKS.DescribeCluster.outpostConfig.controlPlanePlacement + description: An object representing the placement configuration for all the control plane instances of your local Amazon EKS cluster on an Amazon Web Services Outpost. +- contextPath: AWS.EKS.DescribeCluster.accessConfig.bootstrapClusterCreatorAdminPermissions + description: Specifies whether or not the cluster creator IAM principal was set as a cluster admin access entry during cluster creation time. +- contextPath: AWS.EKS.DescribeCluster.accessConfig.authenticationMode + description: The current authentication mode of the cluster. +- contextPath: AWS.IAM.Roles.RoleName + description: The friendly name that identifies the role. +- contextPath: AWS.IAM.Roles.RoleId + description: The stable and unique string identifying the role. +- contextPath: AWS.IAM.Roles.Arn + description: The Amazon Resource Name (ARN) specifying the role. +- contextPath: AWS.IAM.Roles.CreateDate + description: The date and time, when the role was created. +- contextPath: AWS.IAM.Roles.Path + description: The path to the role. +- contextPath: AWS.IAM.Roles.AssumeRolePolicyDocument + description: he policy that grants an entity permission to assume the role. +- contextPath: AWS.IAM.Roles.Description + description: A description of the role that you provide. +- contextPath: AWS.IAM.Roles.MaxSessionDuration + description: The maximum session duration (in seconds) for the specified role. +- contextPath: AWS.IAM.Policies.PolicyName + description: The friendly name (not ARN) identifying the policy. +- contextPath: AWS.IAM.Policies.PolicyId + description: The stable and unique string identifying the policy. +- contextPath: AWS.IAM.Policies.Arn + description: The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. +- contextPath: AWS.IAM.Policies.Path + description: The path to the policy. +- contextPath: AWS.IAM.Policies.DefaultVersionId + description: The identifier for the version of the policy that is set as the default version. +- contextPath: AWS.IAM.Policies.Description + description: A friendly description of the policy. +- contextPath: AWS.IAM.Policies.CreateDate + description: The date and time, in ISO 8601 date-time format , when the policy was created. +- contextPath: AWS.IAM.Policies.UpdateDate + description: The date and time, in ISO 8601 date-time format , when the policy was last updated. +- contextPath: AWS.EKS.CreateAccessEntry.clusterName + description: The name of the cluster. +- contextPath: AWS.EKS.CreateAccessEntry.principalArn + description: The ARN of the IAM principal for the access entry. +- contextPath: AWS.EKS.CreateAccessEntry.kubernetesGroups + description: A list of names that you’ve specified in a Kubernetes RoleBinding or ClusterRoleBinding object so that Kubernetes authorizes the principalARN access to cluster objects. +- contextPath: AWS.EKS.CreateAccessEntry.accessEntryArn + description: The ARN of the access entry. +- contextPath: AWS.EKS.CreateAccessEntry.createdAt + description: The creation date of the object. +- contextPath: AWS.EKS.CreateAccessEntry.modifiedAt + description: The date and time for the last modification to the object. +- contextPath: AWS.EKS.CreateAccessEntry.tags + description: A dictionary containing metadata for categorization and organization. +- contextPath: AWS.EKS.CreateAccessEntry.username + description: The name of a user that can authenticate to the cluster. +- contextPath: AWS.EKS.CreateAccessEntry.type + description: The type of the access entry. +- contextPath: AWS.EKS.AssociatedAccessPolicy.clusterName + description: The name of your cluster. +- contextPath: AWS.EKS.AssociatedAccessPolicy.principalArn + description: The ARN of the IAM principal for the AccessEntry. +- contextPath: AWS.EKS.AssociatedAccessPolicy.policyArn + description: The ARN of the AccessPolicy. +- contextPath: AWS.EKS.AssociatedAccessPolicy.associatedAt + description: The date and time the AccessPolicy was associated with an AccessEntry. +- contextPath: AWS.EKS.AssociatedAccessPolicy.modifiedAt + description: The date and time for the last modification to the object. +- contextPath: AWS.EKS.AssociatedAccessPolicy.accessScope.type + description: The scope type of an access policy. +- contextPath: AWS.EKS.AssociatedAccessPolicy.accessScope.namespaces + description: A Kubernetes namespace that an access policy is scoped to. +- contextPath: AWS.EKS.UpdateAccessEntry.clusterName + description: The name of your cluster. +- contextPath: AWS.EKS.UpdateAccessEntry.principalArn + description: The ARN of the IAM principal for the access entry. +- contextPath: AWS.EKS.UpdateAccessEntry.kubernetesGroups + description: A list of names that you’ve specified in a Kubernetes RoleBinding or ClusterRoleBinding object so that Kubernetes authorizes the principalARN access to cluster objects. +- contextPath: AWS.EKS.UpdateAccessEntry.accessEntryArn + description: The ARN of the access entry. +- contextPath: AWS.EKS.UpdateAccessEntry.createdAt + description: The creation date of the object. +- contextPath: AWS.EKS.UpdateAccessEntry.modifiedAt + description: The date and time for the last modification to the object. +- contextPath: AWS.EKS.UpdateAccessEntry.tags + description: Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. +- contextPath: AWS.EKS.UpdateAccessEntry.username + description: The name of a user that can authenticate to your cluster. +- contextPath: AWS.EKS.UpdateAccessEntry.type + description: The type of the access entry. +- contextPath: AWS.Lambda.Layers.LayerVersionArn + description: The ARN of the layer version. +- contextPath: AWS.Lambda.Layers.LayerArn + description: The ARN of the layer. +- contextPath: AWS.Lambda.Layers.Description + description: The description of the version. +- contextPath: AWS.Lambda.Layers.CreatedDate + description: The date that the layer version was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD). +- contextPath: AWS.Lambda.Layers.Version + description: The version number. +- contextPath: AWS.Lambda.Layers.CompatibleRuntimes + description: The layer’s compatible runtimes. +- contextPath: AWS.Lambda.Layers.CompatibleArchitectures + description: The layer’s compatible architectures. + type: unknown +- contextPath: AWS.Lambda.Functions.FunctionName + description: The name of the function. +- contextPath: AWS.Lambda.Functions.FunctionArn + description: The function’s Amazon Resource Name (ARN). +- contextPath: AWS.Lambda.Functions.Runtime + description: The identifier of the function’s runtime. Runtime is required if the deployment package is a .zip file archive. +- contextPath: AWS.Lambda.Functions.Role + description: The function’s execution role. +- contextPath: AWS.Lambda.Functions.Handler + description: The function that Lambda calls to begin running your function. +- contextPath: AWS.Lambda.Functions.Description + description: The function’s description. +- contextPath: AWS.Lambda.Functions.PackageType + description: The type of deployment package. Set to Image for container image and set Zip for .zip file archive. +- contextPath: AWS.Lambda.Functions.LastModified + description: The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD). +- contextPath: AWS.Lambda.Functions.Timeout + description: The amount of time in seconds that Lambda allows a function to run before stopping it. +- contextPath: AWS.Lambda.Functions.VpcConfig.SubnetIds + description: A list of VPC subnet IDs. +- contextPath: AWS.Lambda.Functions.VpcConfig.SecurityGroupIds + description: A list of VPC security group IDs. +- contextPath: AWS.Lambda.Functions.VpcConfig.VpcId + description: The ID of the VPC. +- contextPath: AWS.Lambda.Functions.VpcConfig.Ipv6AllowedForDualStack + description: Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets. +- contextPath: AWS.EKS.UpdateCluster.clusterName + description: The name of the cluster. +- contextPath: AWS.EKS.UpdateCluster.id + description: The ID of the update. +- contextPath: AWS.EKS.UpdateCluster.status + description: The status of the update. +- contextPath: AWS.EKS.UpdateCluster.type + description: The type of the update. +- contextPath: AWS.EKS.UpdateCluster.params + description: The parameters of the update. +- contextPath: AWS.EKS.UpdateCluster.createdAt + description: The creation date of the object. +- contextPath: File.Name + description: The name of the file. + type: String +- contextPath: File.EntryID + description: The entry ID of the file. + type: String +- contextPath: File.Type + description: The file type. + type: String +- contextPath: File.Extension + description: The file extension. + type: String +- contextPath: File.SHA256 + description: The SHA256 hash of the file. + type: String +- contextPath: ZipFile.ZippedFile + description: The zipped file. + type: string +- contextPath: AWS.EC2.Vpcs.VpcEndpoint.VpcEndpointId + description: The ID of the endpoint. +- contextPath: AWS.EC2.Vpcs.VpcEndpoint.State + description: The state of the VPC endpoint. +- contextPath: AWS.EC2.Vpcs.VpcEndpoint.ServiceName + description: The service name of the VPC endpoint. +- contextPath: AWS.EC2.Vpcs.VpcEndpoint.VpcId + description: The ID of the VPC to which the endpoint is associated. +- contextPath: AWS.EC2.Vpcs.VpcEndpoint.EndpointType + description: The type of the VPC endpoint. +- contextPath: AWS.EC2.Vpcs.VpcId + description: The ID of the VPC. +- contextPath: AWS.EC2.Vpcs.AccountId + description: The ID of the AWS account with which the EC2 instance is associated. This key is only present when the parameter "AWS organization accounts" is provided. +- contextPath: AWS.EC2.Vpcs.State + description: The current state of the VPC. +- contextPath: AWS.EC2.Vpcs.CidrBlock + description: The primary IPv4 CIDR block for the VPC. +- contextPath: AWS.EC2.Vpcs.Tags.Key + description: The key of the tag. +- contextPath: AWS.EC2.Vpcs.Tags.Value + description: The value of the tag. +- contextPath: AWS.EC2.Vpcs.Tags.Ipv6CidrBlockAssociationSet.AssociationId + description: The association ID for the IPv6 CIDR block. +- contextPath: AWS.EC2.Vpcs.Tags.Ipv6CidrBlockAssociationSet.Ipv6CidrBlock + description: The IPv6 CIDR block. +- contextPath: AWS.EC2.Vpcs.Tags.Ipv6CidrBlockAssociationSet.Ipv6CidrBlockState.State + description: The state of the CIDR block. +- contextPath: AWS.EC2.Vpcs.Tags.Ipv6CidrBlockAssociationSet.Ipv6CidrBlockState.StatusMessage + description: A message about the status of the CIDR block, if applicable. +- contextPath: AWS.EC2.Vpcs.Tags.CidrBlockAssociationSet.AssociationId + description: The association ID for the IPv4 CIDR block. +- contextPath: AWS.EC2.Vpcs.Tags.CidrBlockAssociationSet.CidrBlock + description: The IPv4 CIDR block. +- contextPath: AWS.EC2.Vpcs.Tags.CidrBlockAssociationSet.CidrBlockState.State + description: The state of the CIDR block. +- contextPath: AWS.EC2.Vpcs.Tags.CidrBlockAssociationSet.CidrBlockState.StatusMessage + description: A message about the status of the CIDR block, if applicable. +- contextPath: AWS.EC2.SecurityGroups.GroupName + description: The name of the security group. +- contextPath: AWS.EC2.SecurityGroups.Description + description: A description for the security group. +- contextPath: AWS.EC2.SecurityGroups.VpcId + description: The ID of the VPC. +- contextPath: AWS.EC2.SecurityGroups.GroupId + description: The ID of the security group. +- contextPath: AWS.EC2.SecurityGroups.AccountId + description: The ID of the AWS account with which the EC2 instance is associated. This key is only present when the parameter "AWS organization accounts" is provided. +- contextPath: ErrorEntries + description: Contents of the errors associated with the entry/entries. +tests: +- No tests (auto formatted) +marketplaces: + - xsoar + - marketplacev2 +fromversion: 6.10.0 diff --git a/Packs/AWS-Enrichment-Remediation/Playbooks/playbook-Function_Deployment_-_AWS_README.md b/Packs/AWS-Enrichment-Remediation/Playbooks/playbook-Function_Deployment_-_AWS_README.md new file mode 100644 index 000000000000..46f9c53cdc02 --- /dev/null +++ b/Packs/AWS-Enrichment-Remediation/Playbooks/playbook-Function_Deployment_-_AWS_README.md @@ -0,0 +1,270 @@ +This playbook automates the deployment of an AWS Lambda function to manage resources within an Amazon EKS cluster. It ensures that all necessary configurations are created, updated, and verified. + +### Setup + +- **Describe EKS Cluster**: Gather essential details of the EKS cluster. +- **Create IAM Role**: Set up a new IAM role for the Lambda function. +- **Create and Attach Policy**: Define and attach a policy to the IAM role to grant necessary permissions. + +### Authentication Mode Check + +- **Verify Authentication Mode**: Ensure the current authentication mode allows API access. + - **If not**: Update the cluster authentication mode to permit API access. + +### Access Entry Configuration + +- **Create Access Entry**: Establish a new access entry in the EKS cluster. +- **Associate Access Policy**: Link the access policy with the created access entry. +- **Update Access Entry**: Apply the latest configurations to the access entry. + +### VPC and Security Group Setup + +- **Describe VPCs**: Identify the appropriate VPC for the Lambda function. +- **Create Security Group**: Define a security group to manage Lambda function traffic. +- **Set Ingress Rules**: Configure ingress rules for the security group. + +### VPC Endpoint Creation + +- **Create VPC Endpoint for eks-auth**: Establish a VPC endpoint for EKS authentication. +- **Check for Errors**: Verify if there are any errors during the creation of the VPC endpoint. + - **If errors**: Handle and log them. +- **Verify VPC Endpoint Existence**: Ensure the VPC endpoint already exists. + - **If exists**: Proceed with the next steps. + +### Lambda Function Deployment + +- **Download Kubernetes Library**: Fetch the necessary Kubernetes library. +- **Publish AWS Lambda Layer**: Publish a new layer version for the AWS Lambda function. +- **Create Lambda Code**: Develop the Lambda function code. +- **Zip Lambda Code**: Compress the Lambda function code for deployment. +- **Create AWS Lambda Function**: Deploy the Lambda function using the zipped code. + +### Resolution + +- **Final Verification**: Ensure all operations have been successfully completed. +- **Completion**: Confirm the deployment process is finished, ensuring robust management of EKS authentication through AWS Lambda. + +This playbook provides a comprehensive, automated approach to deploying an AWS Lambda function for managing resources within an EKS cluster, efficiently handling all configurations and potential errors. + +### Required Integration + +#### AWS IAM (Identity and Access Management) +- [AWS IAM API Documentation](https://docs.aws.amazon.com/IAM/latest/APIReference/Welcome.html) +- [Cortex XSOAR AWS IAM Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSIAM/) + +#### AWS EC2 (Elastic Compute Cloud) +- [AWS EC2 API Documentation](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Welcome.html) +- [Cortex XSOAR AWS EC2 Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSEC2/) + +#### AWS EKS (Elastic Kubernetes Service) +- [AWS EKS API Documentation](https://docs.aws.amazon.com/eks/latest/APIReference/Welcome.html) +- [Cortex XSOAR AWS EKS Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSEKS/) + +#### AWS Lambda +- [AWS Lambda API Documentation](https://docs.aws.amazon.com/lambda/latest/dg/API_Reference.html) +- [Cortex XSOAR AWS Lambda Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSLambda/). + +## Dependencies + +This playbook uses the following sub-playbooks, integrations, and scripts. + +### Sub-playbooks + +This playbook does not use any sub-playbooks. + +### Integrations + +This playbook does not use any integrations. + +### Scripts + +* FileCreateAndUploadV2 +* GetErrorsFromEntry +* ZipFile +* DownloadAndArchivePythonLibrary +* PrintErrorEntry + +### Commands + +* aws-eks-describe-cluster +* aws-ec2-create-security-group +* aws-eks-associate-access-policy +* aws-ec2-create-vpc-endpoint +* aws-eks-update-cluster-config +* aws-iam-attach-policy +* aws-eks-update-access-entry +* aws-ec2-describe-vpcs +* aws-ec2-authorize-security-group-ingress-rule +* aws-lambda-publish-layer-version +* aws-eks-create-access-entry +* aws-lambda-create-function +* aws-iam-create-policy +* aws-iam-create-role + +## Playbook Inputs + +--- + +| **Name** | **Description** | **Default Value** | **Required** | +| --- | --- | --- | --- | +| ClusterName | The name of the cluster. | | Optional | +| region | The region of the cluster. | | Optional | +| FunctionName | The name of the Lambda function. | | Optional | +| FunctionCode | Entry ID of the uploaded base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you. | | Optional | +| FunctionRuntime | The runtime environment for the function. | | Optional | +| FunctionHandler | The name of the method within your code that Lambda calls to execute your function. | | Optional | +| LayerName | The name to assign to the new Lambda layer. | | Optional | +| LayerRuntime | The name of the method within your code that Lambda calls to execute your function. | | Optional | +| LayerArchitecture | A list of compatible architectures. | | Optional | +| LibraryName | The Python library to download and attach to the Lambda Function.
e.g.
To be able to use the kubernetes library, use 'kubernetes' and the script will download and pack its files. | | Optional | +| LambdaRoleName | The lambda role name to create. | | Optional | +| LambdaPolicyName | The lambda policy name to create. | | Optional | +| SecurityGroupName | The security group name to create. | | Optional | + +## Playbook Outputs + +--- + +| **Path** | **Description** | **Type** | +| --- | --- | --- | +| AWS.EKS.DescribeCluster.name | The name of your cluster. | unknown | +| AWS.EKS.DescribeCluster.arn | The Amazon Resource Name \(ARN\) of the cluster. | unknown | +| AWS.EKS.DescribeCluster.createdAt | The creation date of the object. | unknown | +| AWS.EKS.DescribeCluster.version | The Kubernetes server version for the cluster. | unknown | +| AWS.EKS.DescribeCluster.endpoint | The endpoint for your Kubernetes API server. | unknown | +| AWS.EKS.DescribeCluster.roleArn | The Amazon Resource Name \(ARN\) of the IAM role that provides permissions for the Kubernetes control plane to make calls to Amazon Web Services API operations on your behalf. | unknown | +| AWS.EKS.DescribeCluster.identity | The identity provider information for the cluster. | unknown | +| AWS.EKS.DescribeCluster.status | The current status of the cluster. | unknown | +| AWS.EKS.DescribeCluster.clientRequestToken | A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. | unknown | +| AWS.EKS.DescribeCluster.platformVersion | The platform version of your Amazon EKS cluster. | unknown | +| AWS.EKS.DescribeCluster.tags | A dictionary containing metadata for categorization and organization. | unknown | +| AWS.EKS.DescribeCluster.id | The ID of your local Amazon EKS cluster on an Amazon Web Services Outpost. | unknown | +| AWS.EKS.DescribeCluster.resourcesVpcConfig.subnetIds | The subnets associated with your cluster. | unknown | +| AWS.EKS.DescribeCluster.resourcesVpcConfig.securityGroupIds | The security groups associated with the cross-account elastic network interfaces that are used to allow communication between your nodes and the Kubernetes control plane. | unknown | +| AWS.EKS.DescribeCluster.resourcesVpcConfig.clusterSecurityGroupId | The cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. | unknown | +| AWS.EKS.DescribeCluster.resourcesVpcConfig.vpcId | The VPC associated with your cluster. | unknown | +| AWS.EKS.DescribeCluster.resourcesVpcConfig.endpointPublicAccess | Whether the public API server endpoint is enabled. | unknown | +| AWS.EKS.DescribeCluster.resourcesVpcConfig.endpointPrivateAccess | This parameter indicates whether the Amazon EKS private API server endpoint is enabled. | unknown | +| AWS.EKS.DescribeCluster.resourcesVpcConfig.publicAccessCidrs | The CIDR blocks that are allowed access to your cluster’s public Kubernetes API server endpoint. | unknown | +| AWS.EKS.DescribeCluster.kubernetesNetworkConfig.serviceIpv4Cidr | The CIDR block that Kubernetes Pod and Service object IP addresses are assigned from. | unknown | +| AWS.EKS.DescribeCluster.kubernetesNetworkConfig.serviceIpv6Cidr | The CIDR block that Kubernetes Pod and Service IP addresses are assigned from if you created a 1.21 or later cluster with version 1.10.1 or later of the Amazon VPC CNI add-on and specified ipv6 for ipFamily when you created the cluster. | unknown | +| AWS.EKS.DescribeCluster.kubernetesNetworkConfig.ipFamily | The IP family used to assign Kubernetes Pod and Service objects IP addresses. | unknown | +| AWS.EKS.DescribeCluster.logging.clusterLogging | The cluster control plane logging configuration for your cluster. | unknown | +| AWS.EKS.DescribeCluster.certificateAuthority.data | The Base64-encoded certificate data required to communicate with your cluster. | unknown | +| AWS.EKS.DescribeCluster.encryptionConfig.resources | Specifies the resources to be encrypted. The only supported value is secrets. | unknown | +| AWS.EKS.DescribeCluster.encryptionConfig.provider | Key Management Service \(KMS\) key. | unknown | +| AWS.EKS.DescribeCluster.connectorConfig.activationId | A unique ID associated with the cluster for registration purposes. | unknown | +| AWS.EKS.DescribeCluster.connectorConfig.activationCode | A unique code associated with the cluster for registration purposes. | unknown | +| AWS.EKS.DescribeCluster.connectorConfig.activationExpiry | The expiration time of the connected cluster. | unknown | +| AWS.EKS.DescribeCluster.connectorConfig.provider | The cluster’s cloud service provider. | unknown | +| AWS.EKS.DescribeCluster.connectorConfig.roleArn | The Amazon Resource Name \(ARN\) of the role to communicate with services from the connected Kubernetes cluster. | unknown | +| AWS.EKS.DescribeCluster.health.issues | An object representing the health issues of your local Amazon EKS cluster on an Amazon Web Services Outpost. | unknown | +| AWS.EKS.DescribeCluster.outpostConfig.outpostArns | An object representing the configuration of your local Amazon EKS cluster on an Amazon Web Services Outpost. | unknown | +| AWS.EKS.DescribeCluster.outpostConfig.controlPlaneInstanceType | The Amazon EC2 instance type used for the control plane. | unknown | +| AWS.EKS.DescribeCluster.outpostConfig.controlPlanePlacement | An object representing the placement configuration for all the control plane instances of your local Amazon EKS cluster on an Amazon Web Services Outpost. | unknown | +| AWS.EKS.DescribeCluster.accessConfig.bootstrapClusterCreatorAdminPermissions | Specifies whether or not the cluster creator IAM principal was set as a cluster admin access entry during cluster creation time. | unknown | +| AWS.EKS.DescribeCluster.accessConfig.authenticationMode | The current authentication mode of the cluster. | unknown | +| AWS.IAM.Roles.RoleName | The friendly name that identifies the role. | unknown | +| AWS.IAM.Roles.RoleId | The stable and unique string identifying the role. | unknown | +| AWS.IAM.Roles.Arn | The Amazon Resource Name \(ARN\) specifying the role. | unknown | +| AWS.IAM.Roles.CreateDate | The date and time, when the role was created. | unknown | +| AWS.IAM.Roles.Path | The path to the role. | unknown | +| AWS.IAM.Roles.AssumeRolePolicyDocument | he policy that grants an entity permission to assume the role. | unknown | +| AWS.IAM.Roles.Description | A description of the role that you provide. | unknown | +| AWS.IAM.Roles.MaxSessionDuration | The maximum session duration \(in seconds\) for the specified role. | unknown | +| AWS.IAM.Policies.PolicyName | The friendly name \(not ARN\) identifying the policy. | unknown | +| AWS.IAM.Policies.PolicyId | The stable and unique string identifying the policy. | unknown | +| AWS.IAM.Policies.Arn | The Amazon Resource Name \(ARN\). ARNs are unique identifiers for AWS resources. | unknown | +| AWS.IAM.Policies.Path | The path to the policy. | unknown | +| AWS.IAM.Policies.DefaultVersionId | The identifier for the version of the policy that is set as the default version. | unknown | +| AWS.IAM.Policies.Description | A friendly description of the policy. | unknown | +| AWS.IAM.Policies.CreateDate | The date and time, in ISO 8601 date-time format , when the policy was created. | unknown | +| AWS.IAM.Policies.UpdateDate | The date and time, in ISO 8601 date-time format , when the policy was last updated. | unknown | +| AWS.EKS.CreateAccessEntry.clusterName | The name of the cluster. | unknown | +| AWS.EKS.CreateAccessEntry.principalArn | The ARN of the IAM principal for the access entry. | unknown | +| AWS.EKS.CreateAccessEntry.kubernetesGroups | A list of names that you’ve specified in a Kubernetes RoleBinding or ClusterRoleBinding object so that Kubernetes authorizes the principalARN access to cluster objects. | unknown | +| AWS.EKS.CreateAccessEntry.accessEntryArn | The ARN of the access entry. | unknown | +| AWS.EKS.CreateAccessEntry.createdAt | The creation date of the object. | unknown | +| AWS.EKS.CreateAccessEntry.modifiedAt | The date and time for the last modification to the object. | unknown | +| AWS.EKS.CreateAccessEntry.tags | A dictionary containing metadata for categorization and organization. | unknown | +| AWS.EKS.CreateAccessEntry.username | The name of a user that can authenticate to the cluster. | unknown | +| AWS.EKS.CreateAccessEntry.type | The type of the access entry. | unknown | +| AWS.EKS.AssociatedAccessPolicy.clusterName | The name of your cluster. | unknown | +| AWS.EKS.AssociatedAccessPolicy.principalArn | The ARN of the IAM principal for the AccessEntry. | unknown | +| AWS.EKS.AssociatedAccessPolicy.policyArn | The ARN of the AccessPolicy. | unknown | +| AWS.EKS.AssociatedAccessPolicy.associatedAt | The date and time the AccessPolicy was associated with an AccessEntry. | unknown | +| AWS.EKS.AssociatedAccessPolicy.modifiedAt | The date and time for the last modification to the object. | unknown | +| AWS.EKS.AssociatedAccessPolicy.accessScope.type | The scope type of an access policy. | unknown | +| AWS.EKS.AssociatedAccessPolicy.accessScope.namespaces | A Kubernetes namespace that an access policy is scoped to. | unknown | +| AWS.EKS.UpdateAccessEntry.clusterName | The name of your cluster. | unknown | +| AWS.EKS.UpdateAccessEntry.principalArn | The ARN of the IAM principal for the access entry. | unknown | +| AWS.EKS.UpdateAccessEntry.kubernetesGroups | A list of names that you’ve specified in a Kubernetes RoleBinding or ClusterRoleBinding object so that Kubernetes authorizes the principalARN access to cluster objects. | unknown | +| AWS.EKS.UpdateAccessEntry.accessEntryArn | The ARN of the access entry. | unknown | +| AWS.EKS.UpdateAccessEntry.createdAt | The creation date of the object. | unknown | +| AWS.EKS.UpdateAccessEntry.modifiedAt | The date and time for the last modification to the object. | unknown | +| AWS.EKS.UpdateAccessEntry.tags | Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. | unknown | +| AWS.EKS.UpdateAccessEntry.username | The name of a user that can authenticate to your cluster. | unknown | +| AWS.EKS.UpdateAccessEntry.type | The type of the access entry. | unknown | +| AWS.Lambda.Layers.LayerVersionArn | The ARN of the layer version. | unknown | +| AWS.Lambda.Layers.LayerArn | The ARN of the layer. | unknown | +| AWS.Lambda.Layers.Description | The description of the version. | unknown | +| AWS.Lambda.Layers.CreatedDate | The date that the layer version was created, in ISO-8601 format \(YYYY-MM-DDThh:mm:ss.sTZD\). | unknown | +| AWS.Lambda.Layers.Version | The version number. | unknown | +| AWS.Lambda.Layers.CompatibleRuntimes | The layer’s compatible runtimes. | unknown | +| AWS.Lambda.Layers.CompatibleArchitectures | The layer’s compatible architectures. | unknown | +| AWS.Lambda.Functions.FunctionName | The name of the function. | unknown | +| AWS.Lambda.Functions.FunctionArn | The function’s Amazon Resource Name \(ARN\). | unknown | +| AWS.Lambda.Functions.Runtime | The identifier of the function’s runtime. Runtime is required if the deployment package is a .zip file archive. | unknown | +| AWS.Lambda.Functions.Role | The function’s execution role. | unknown | +| AWS.Lambda.Functions.Handler | The function that Lambda calls to begin running your function. | unknown | +| AWS.Lambda.Functions.Description | The function’s description. | unknown | +| AWS.Lambda.Functions.PackageType | The type of deployment package. Set to Image for container image and set Zip for .zip file archive. | unknown | +| AWS.Lambda.Functions.LastModified | The date and time that the function was last updated, in ISO-8601 format \(YYYY-MM-DDThh:mm:ss.sTZD\). | unknown | +| AWS.Lambda.Functions.Timeout | The amount of time in seconds that Lambda allows a function to run before stopping it. | unknown | +| AWS.Lambda.Functions.VpcConfig.SubnetIds | A list of VPC subnet IDs. | unknown | +| AWS.Lambda.Functions.VpcConfig.SecurityGroupIds | A list of VPC security group IDs. | unknown | +| AWS.Lambda.Functions.VpcConfig.VpcId | The ID of the VPC. | unknown | +| AWS.Lambda.Functions.VpcConfig.Ipv6AllowedForDualStack | Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets. | unknown | +| AWS.EKS.UpdateCluster.clusterName | The name of the cluster. | unknown | +| AWS.EKS.UpdateCluster.id | The ID of the update. | unknown | +| AWS.EKS.UpdateCluster.status | The status of the update. | unknown | +| AWS.EKS.UpdateCluster.type | The type of the update. | unknown | +| AWS.EKS.UpdateCluster.params | The parameters of the update. | unknown | +| AWS.EKS.UpdateCluster.createdAt | The creation date of the object. | unknown | +| File.Name | The name of the file. | String | +| File.EntryID | The entry ID of the file. | String | +| File.Type | The file type. | String | +| File.Extension | The file extension. | String | +| File.SHA256 | The SHA256 hash of the file. | String | +| ZipFile.ZippedFile | The zipped file. | string | +| AWS.EC2.Vpcs.VpcEndpoint.VpcEndpointId | The ID of the endpoint. | unknown | +| AWS.EC2.Vpcs.VpcEndpoint.State | The state of the VPC endpoint. | unknown | +| AWS.EC2.Vpcs.VpcEndpoint.ServiceName | The service name of the VPC endpoint. | unknown | +| AWS.EC2.Vpcs.VpcEndpoint.VpcId | The ID of the VPC to which the endpoint is associated. | unknown | +| AWS.EC2.Vpcs.VpcEndpoint.EndpointType | The type of the VPC endpoint. | unknown | +| AWS.EC2.Vpcs.VpcId | The ID of the VPC. | unknown | +| AWS.EC2.Vpcs.AccountId | The ID of the AWS account with which the EC2 instance is associated. This key is only present when the parameter "AWS organization accounts" is provided. | unknown | +| AWS.EC2.Vpcs.State | The current state of the VPC. | unknown | +| AWS.EC2.Vpcs.CidrBlock | The primary IPv4 CIDR block for the VPC. | unknown | +| AWS.EC2.Vpcs.Tags.Key | The key of the tag. | unknown | +| AWS.EC2.Vpcs.Tags.Value | The value of the tag. | unknown | +| AWS.EC2.Vpcs.Tags.Ipv6CidrBlockAssociationSet.AssociationId | The association ID for the IPv6 CIDR block. | unknown | +| AWS.EC2.Vpcs.Tags.Ipv6CidrBlockAssociationSet.Ipv6CidrBlock | The IPv6 CIDR block. | unknown | +| AWS.EC2.Vpcs.Tags.Ipv6CidrBlockAssociationSet.Ipv6CidrBlockState.State | The state of the CIDR block. | unknown | +| AWS.EC2.Vpcs.Tags.Ipv6CidrBlockAssociationSet.Ipv6CidrBlockState.StatusMessage | A message about the status of the CIDR block, if applicable. | unknown | +| AWS.EC2.Vpcs.Tags.CidrBlockAssociationSet.AssociationId | The association ID for the IPv4 CIDR block. | unknown | +| AWS.EC2.Vpcs.Tags.CidrBlockAssociationSet.CidrBlock | The IPv4 CIDR block. | unknown | +| AWS.EC2.Vpcs.Tags.CidrBlockAssociationSet.CidrBlockState.State | The state of the CIDR block. | unknown | +| AWS.EC2.Vpcs.Tags.CidrBlockAssociationSet.CidrBlockState.StatusMessage | A message about the status of the CIDR block, if applicable. | unknown | +| AWS.EC2.SecurityGroups.GroupName | The name of the security group. | unknown | +| AWS.EC2.SecurityGroups.Description | A description for the security group. | unknown | +| AWS.EC2.SecurityGroups.VpcId | The ID of the VPC. | unknown | +| AWS.EC2.SecurityGroups.GroupId | The ID of the security group. | unknown | +| AWS.EC2.SecurityGroups.AccountId | The ID of the AWS account with which the EC2 instance is associated. This key is only present when the parameter "AWS organization accounts" is provided. | unknown | +| ErrorEntries | Contents of the errors associated with the entry/entries. | unknown | + +## Playbook Image + +--- + +![Function Deployment - AWS](../doc_files/Function_Deployment_-_AWS.png) diff --git a/Packs/AWS-Enrichment-Remediation/Playbooks/playbook-Function_Removal_-_AWS.yml b/Packs/AWS-Enrichment-Remediation/Playbooks/playbook-Function_Removal_-_AWS.yml new file mode 100644 index 000000000000..0de2a4818fd0 --- /dev/null +++ b/Packs/AWS-Enrichment-Remediation/Playbooks/playbook-Function_Removal_-_AWS.yml @@ -0,0 +1,489 @@ +id: Function Removal - AWS +version: -1 +name: Function Removal - AWS +description: |- + This playbook automates the removal of an AWS Lambda function and its associated resources used for managing resources within an Amazon EKS cluster. It ensures all related roles, policies, and security configurations are properly detached and deleted. + + ### Resource Detachment and Deletion + + - **Get the Lambda Role**: Retrieve the IAM role associated with the Lambda function. + - **Detach Policy from Lambda Role**: Remove the policy attached to the Lambda role. + - **Delete IAM Role**: Delete the IAM role that was used for the Lambda function. + - **Delete Lambda Policy**: Remove the policy specifically created for the Lambda function. + - **Delete Security Group**: Delete the security group that was managing the Lambda function's traffic. + + ### Access Entry Check + + - **Check if Access Entry was Created**: Verify if the access entry for the EKS cluster was created. + - **If YES**: Proceed to delete additional resources. + - **If NO**: Skip the deletion of additional resources. + + ### Additional Resource Deletion + + - **Delete Kubernetes Layer**: Remove the Kubernetes layer that was used by the Lambda function. + - **Delete Lambda Function**: Delete the Lambda function itself, ensuring all related code and configurations are removed. + + ### Resolution + + - **Final Cleanup**: Ensure all specified resources have been deleted successfully. + - **Completion**: Confirm that the removal process is complete, providing a clean environment free from the previously deployed Lambda function and its configurations. + + This playbook provides a comprehensive, automated approach to removing an AWS Lambda function and its related resources, ensuring all configurations and dependencies are properly managed and deleted. + + ### Required Integration + + #### AWS IAM (Identity and Access Management) + - [AWS IAM API Documentation](https://docs.aws.amazon.com/IAM/latest/APIReference/Welcome.html) + - [Cortex XSOAR AWS IAM Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSIAM/) + + #### AWS EC2 (Elastic Compute Cloud) + - [AWS EC2 API Documentation](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Welcome.html) + - [Cortex XSOAR AWS EC2 Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSEC2/) + + #### AWS Lambda + - [AWS Lambda API Documentation](https://docs.aws.amazon.com/lambda/latest/dg/API_Reference.html) + - [Cortex XSOAR AWS Lambda Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSLambda/). +starttaskid: "0" +tasks: + "0": + id: "0" + taskid: d93db492-de20-46e2-85a1-4ef9ba832775 + type: start + task: + id: d93db492-de20-46e2-85a1-4ef9ba832775 + version: -1 + name: "" + iscommand: false + brand: "" + description: '' + nexttasks: + '#none#': + - "12" + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": -110 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "2": + id: "2" + taskid: fdbcc382-ca58-4afa-83f0-7e728756d41a + type: regular + task: + id: fdbcc382-ca58-4afa-83f0-7e728756d41a + version: -1 + name: Delete an AWS IAM role + description: Deletes the specified role. The role must not have any policies attached. + script: '|||aws-iam-delete-role' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "3" + scriptarguments: + roleName: + simple: ${AWS.IAM.Roles.RoleName} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 340 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "3": + id: "3" + taskid: 7564f56e-3c0a-4b00-87ba-b788e24fa2d9 + type: regular + task: + id: 7564f56e-3c0a-4b00-87ba-b788e24fa2d9 + version: -1 + name: Delete the lambda policy + description: Deletes the specified managed policy. Before you can delete a managed policy, you must first detach the policy from all users, groups, and roles that it is attached to. In addition you must delete all the policy's versions. + script: '|||aws-iam-delete-policy' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "13" + scriptarguments: + policyArn: + simple: ${inputs.LambdaRolePolicyARN} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 500 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "8": + id: "8" + taskid: 0d04fbd9-0015-488f-8758-62d6be2e9cca + type: regular + task: + id: 0d04fbd9-0015-488f-8758-62d6be2e9cca + version: -1 + name: Delete the kubernetes layer + description: Deletes a version of an Lambda layer. + script: '|||aws-lambda-delete-layer-version' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "9" + scriptarguments: + layer-name: + simple: ${inputs.LambdaLayerName} + region: + simple: ${inputs.region} + version-number: + simple: ${inputs.LambdaLayerVersion} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 1000 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "9": + id: "9" + taskid: 4a042a6e-cb90-41fc-8f3f-203cd0b71fcd + type: regular + task: + id: 4a042a6e-cb90-41fc-8f3f-203cd0b71fcd + version: -1 + name: Delete the lambda function + description: Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. + script: '|||aws-lambda-delete-function' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "10" + scriptarguments: + functionName: + simple: ${inputs.LambdaFunctionName} + region: + simple: ${inputs.region} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 1160 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "10": + id: "10" + taskid: 3b0700e8-cbd4-49ea-89a8-baab89e30a37 + type: title + task: + id: 3b0700e8-cbd4-49ea-89a8-baab89e30a37 + version: -1 + name: Done + type: title + iscommand: false + brand: "" + description: '' + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 1320 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "11": + id: "11" + taskid: cd61c4e6-55ae-479e-8be5-16ed088c9094 + type: regular + task: + id: cd61c4e6-55ae-479e-8be5-16ed088c9094 + version: -1 + name: Detach policy from the Lambda role + description: Removes the specified managed policy from the specified IAM Entity. + script: '|||aws-iam-detach-policy' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "2" + scriptarguments: + entityName: + simple: ${inputs.LambdaRoleName} + policyArn: + simple: ${inputs.LambdaRolePolicyARN} + retry-count: + simple: "3" + retry-interval: + simple: "5" + type: + simple: Role + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 180 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "12": + id: "12" + taskid: 391e8948-b824-4daf-867c-9765a1c2e2cb + type: regular + task: + id: 391e8948-b824-4daf-867c-9765a1c2e2cb + version: -1 + name: Get the Lambda role + description: Retrieves information about the specified role. + script: '|||aws-iam-get-role' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "11" + scriptarguments: + roleName: + simple: ${inputs.LambdaRoleName} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 20 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "13": + id: "13" + taskid: 999be9e9-3f12-4d65-846a-a8d9b1698546 + type: regular + task: + id: 999be9e9-3f12-4d65-846a-a8d9b1698546 + version: -1 + name: Delete the security group + description: Deletes a security group. + script: '|||aws-ec2-delete-security-group' + type: regular + iscommand: true + brand: "" + nexttasks: + '#none#': + - "14" + scriptarguments: + groupId: + simple: ${inputs.SecurityGroupID} + region: + simple: ${inputs.region} + separatecontext: false + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 660 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false + "14": + id: "14" + taskid: d7292c20-05a4-4ecc-8cae-6527fa34f097 + type: condition + task: + id: d7292c20-05a4-4ecc-8cae-6527fa34f097 + version: -1 + name: Check if an access entry was created + description: Checks if an access entry was created for the Lambda role in the Function Deployment playbook. + type: condition + iscommand: false + brand: "" + nexttasks: + '#default#': + - "10" + "yes": + - "8" + separatecontext: false + conditions: + - label: "yes" + condition: + - - operator: isNotExists + left: + value: + simple: incident.errormessage + iscontext: true + continueonerrortype: "" + view: |- + { + "position": { + "x": 450, + "y": 820 + } + } + note: false + timertriggers: [] + ignoreworker: false + skipunavailable: false + quietmode: 0 + isoversize: false + isautoswitchedtoquietmode: false +view: |- + { + "linkLabelsPosition": { + "14_8_yes": 0.35 + }, + "paper": { + "dimensions": { + "height": 1495, + "width": 380, + "x": 450, + "y": -110 + } + } + } +inputs: +- key: region + value: {} + required: false + description: The region of the resource. + playbookInputQuery: +- key: LambdaFunctionName + value: {} + required: false + description: The Lambda function name. + playbookInputQuery: +- key: LambdaLayerVersion + value: {} + required: false + description: The Lambda layer version. + playbookInputQuery: +- key: LambdaLayerName + value: {} + required: false + description: The Lambda layer name. + playbookInputQuery: +- key: LambdaRoleName + value: {} + required: false + description: The lambda role name to delete. + playbookInputQuery: +- key: LambdaRolePolicyARN + value: {} + required: false + description: The ARN of the policy to delete. + playbookInputQuery: +- key: SecurityGroupID + value: {} + required: false + description: The security group ID. + playbookInputQuery: +inputSections: +- inputs: + - region + name: General (Inputs group) + description: Generic group for inputs +- inputs: + - LambdaFunctionName + - LambdaLayerVersion + - LambdaLayerName + name: FunctionParameters + description: This group holds inputs related to the function deletion. +- inputs: + - LambdaRoleName + - LambdaRolePolicyARN + name: IAMParameters + description: | + This group holds inputs related to IAM. +- inputs: + - SecurityGroupID + name: EC2Parameters + description: This group holds inputs related to EC2. +outputSections: +- outputs: [] + name: General (Outputs group) + description: Generic group for outputs +outputs: [] +tests: +- No tests (auto formatted) +marketplaces: + - xsoar + - marketplacev2 +fromversion: 6.10.0 diff --git a/Packs/AWS-Enrichment-Remediation/Playbooks/playbook-Function_Removal_-_AWS_README.md b/Packs/AWS-Enrichment-Remediation/Playbooks/playbook-Function_Removal_-_AWS_README.md new file mode 100644 index 000000000000..116c9c90d9e9 --- /dev/null +++ b/Packs/AWS-Enrichment-Remediation/Playbooks/playbook-Function_Removal_-_AWS_README.md @@ -0,0 +1,92 @@ +This playbook automates the removal of an AWS Lambda function and its associated resources used for managing resources within an Amazon EKS cluster. It ensures all related roles, policies, and security configurations are properly detached and deleted. + +### Resource Detachment and Deletion + +- **Get the Lambda Role**: Retrieve the IAM role associated with the Lambda function. +- **Detach Policy from Lambda Role**: Remove the policy attached to the Lambda role. +- **Delete IAM Role**: Delete the IAM role that was used for the Lambda function. +- **Delete Lambda Policy**: Remove the policy specifically created for the Lambda function. +- **Delete Security Group**: Delete the security group that was managing the Lambda function's traffic. + +### Access Entry Check + +- **Check if Access Entry was Created**: Verify if the access entry for the EKS cluster was created. + - **If YES**: Proceed to delete additional resources. + - **If NO**: Skip the deletion of additional resources. + +### Additional Resource Deletion + +- **Delete Kubernetes Layer**: Remove the Kubernetes layer that was used by the Lambda function. +- **Delete Lambda Function**: Delete the Lambda function itself, ensuring all related code and configurations are removed. + +### Resolution + +- **Final Cleanup**: Ensure all specified resources have been deleted successfully. +- **Completion**: Confirm that the removal process is complete, providing a clean environment free from the previously deployed Lambda function and its configurations. + +This playbook provides a comprehensive, automated approach to removing an AWS Lambda function and its related resources, ensuring all configurations and dependencies are properly managed and deleted. + +### Required Integration + +#### AWS IAM (Identity and Access Management) +- [AWS IAM API Documentation](https://docs.aws.amazon.com/IAM/latest/APIReference/Welcome.html) +- [Cortex XSOAR AWS IAM Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSIAM/) + +#### AWS EC2 (Elastic Compute Cloud) +- [AWS EC2 API Documentation](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Welcome.html) +- [Cortex XSOAR AWS EC2 Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSEC2/) + +#### AWS Lambda +- [AWS Lambda API Documentation](https://docs.aws.amazon.com/lambda/latest/dg/API_Reference.html) +- [Cortex XSOAR AWS Lambda Integration](https://cortex.marketplace.pan.dev/marketplace/details/AWSLambda/). + +## Dependencies + +This playbook uses the following sub-playbooks, integrations, and scripts. + +### Sub-playbooks + +This playbook does not use any sub-playbooks. + +### Integrations + +This playbook does not use any integrations. + +### Scripts + +This playbook does not use any scripts. + +### Commands + +* aws-iam-detach-policy +* aws-iam-get-role +* aws-iam-delete-policy +* aws-lambda-delete-layer-version +* aws-ec2-delete-security-group +* aws-lambda-delete-function +* aws-iam-delete-role + +## Playbook Inputs + +--- + +| **Name** | **Description** | **Default Value** | **Required** | +| --- | --- | --- | --- | +| region | The region of the resource. | | Optional | +| LambdaFunctionName | The Lambda function name. | | Optional | +| LambdaLayerVersion | The Lambda layer version. | | Optional | +| LambdaLayerName | The Lambda layer name. | | Optional | +| LambdaRoleName | The lambda role name to delete. | | Optional | +| LambdaRolePolicyARN | The ARN of the policy to delete. | | Optional | +| SecurityGroupID | The security group ID. | | Optional | + +## Playbook Outputs + +--- +There are no outputs for this playbook. + +## Playbook Image + +--- + +![Function Removal - AWS](../doc_files/Function_Removal_-_AWS.png) diff --git a/Packs/AWS-Enrichment-Remediation/README.md b/Packs/AWS-Enrichment-Remediation/README.md index 112964da2521..b7bf5dcbc422 100644 --- a/Packs/AWS-Enrichment-Remediation/README.md +++ b/Packs/AWS-Enrichment-Remediation/README.md @@ -1,12 +1,12 @@ ##### What does this pack do? The pack contains AWS playbooks that conduct enrichment and/or remediation and can use multiple other AWS content packs: -- Enrichment: Give an IP address, see if there is a EC2 instance associated and if so pull information on the security group associated. -- Remediation: Give the information collected from enrichment, replace the security group with a "quarantine" security group until vulnerabilities are resolved. +- Enrichment: Given an IP address, see if there is an associated EC2 instance and if so pull information on the associated security group. If an SSM agent is installed on the instance, pull information from the SSM agent. +- Remediation: Given the information collected from enrichment, replace the security group with a "quarantine" security group until vulnerabilities are resolved. - Unclaimed S3 Bucket Validation: The playbook sends a HTTP get response to the domain and validates the missing bucket information. - Unclaimed S3 Bucket Remediation: The playbook will create the unclaimed S3 bucket. -There are multiple AWS content packs for multiple AWS products (EC2, IAM, Route53, S3, etc). The intent was that users can install and use only the packs they need. However, if an AWS playbook uses multiple pack integrations (such as EC2, S3 and IAM), the integrations can't reside in one of the current packs because they include content from multiple pack integrations. This pack was created as a place to put AWS playbooks that use AWS integrations from multiple packs with a focus on enrichment and remediation. +There are multiple AWS content packs for multiple AWS products (EC2, IAM, Route53, S3, SSM, etc.). The intent was that users can install and use only the packs they need. However, if an AWS playbook uses multiple pack integrations (such as EC2, S3, SSM, and IAM), the integrations can't reside in one of the current packs because they include content from multiple pack integrations. This pack was created as a place to put AWS playbooks that use AWS integrations from multiple packs with a focus on enrichment and remediation. ### Playbooks diff --git a/Packs/AWS-Enrichment-Remediation/ReleaseNotes/1_1_18.md b/Packs/AWS-Enrichment-Remediation/ReleaseNotes/1_1_18.md new file mode 100644 index 000000000000..3ebd4384564c --- /dev/null +++ b/Packs/AWS-Enrichment-Remediation/ReleaseNotes/1_1_18.md @@ -0,0 +1,6 @@ + +#### Playbooks + +##### AWS - Enrichment + +Updated the playbook to enrich EC2 and IAM information using AWS SSM. diff --git a/Packs/AWS-Enrichment-Remediation/ReleaseNotes/1_1_19.md b/Packs/AWS-Enrichment-Remediation/ReleaseNotes/1_1_19.md new file mode 100644 index 000000000000..b055304714f3 --- /dev/null +++ b/Packs/AWS-Enrichment-Remediation/ReleaseNotes/1_1_19.md @@ -0,0 +1,10 @@ + +#### Playbooks + +##### New: Function Deployment - AWS + +- New: This playbook automates the deployment of an AWS Lambda function to manage resources within an Amazon EKS cluster. It ensures that all necessary configurations are created, updated, and verified. + +##### New: Function Removal - AWS + +- New: This playbook automates the removal of an AWS Lambda function and its associated resources used for managing resources within an Amazon EKS cluster. It ensures all related roles, policies, and security configurations are properly detached and deleted. diff --git a/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Enrichment.png b/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Enrichment.png index aa7663a07673..626d5c99b882 100644 Binary files a/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Enrichment.png and b/Packs/AWS-Enrichment-Remediation/doc_files/AWS_-_Enrichment.png differ diff --git a/Packs/AWS-Enrichment-Remediation/doc_files/Function_Deployment_-_AWS.png b/Packs/AWS-Enrichment-Remediation/doc_files/Function_Deployment_-_AWS.png new file mode 100644 index 000000000000..5c79a12533d1 Binary files /dev/null and b/Packs/AWS-Enrichment-Remediation/doc_files/Function_Deployment_-_AWS.png differ diff --git a/Packs/AWS-Enrichment-Remediation/doc_files/Function_Removal_-_AWS.png b/Packs/AWS-Enrichment-Remediation/doc_files/Function_Removal_-_AWS.png new file mode 100644 index 000000000000..eb224b8c8946 Binary files /dev/null and b/Packs/AWS-Enrichment-Remediation/doc_files/Function_Removal_-_AWS.png differ diff --git a/Packs/AWS-Enrichment-Remediation/pack_metadata.json b/Packs/AWS-Enrichment-Remediation/pack_metadata.json index e11e837e302a..b46cb27eb6ec 100644 --- a/Packs/AWS-Enrichment-Remediation/pack_metadata.json +++ b/Packs/AWS-Enrichment-Remediation/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS Enrichment and Remediation", "description": "Playbooks using multiple AWS content packs for enrichment and remediation purposes", "support": "xsoar", - "currentVersion": "1.1.17", + "currentVersion": "1.1.19", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", @@ -33,12 +33,17 @@ "AWS-S3": { "mandatory": true, "display_name": "AWS - S3" + }, + "AWS_SystemManager": { + "mandatory": true, + "display_name": "AWS Systems Manager" } }, "displayedImages": [ "AWS-IAM", "CommonScripts", "AWS-EC2", - "AWS-S3" + "AWS-S3", + "AWS_SystemManager" ] } \ No newline at end of file diff --git a/Packs/AWS-GuardDuty/.pack-ignore b/Packs/AWS-GuardDuty/.pack-ignore index 2448660eb5cb..459381d1be37 100644 --- a/Packs/AWS-GuardDuty/.pack-ignore +++ b/Packs/AWS-GuardDuty/.pack-ignore @@ -1,5 +1,5 @@ [file:AWSGuardDuty.yml] -ignore=IN126,BA108,BA109,IN124 +ignore=IN126,BA108,BA109 [known_words] gd diff --git a/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty.yml b/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty.yml index 3be723c976d4..f10f3b201a25 100644 --- a/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty.yml +++ b/Packs/AWS-GuardDuty/Integrations/AWSGuardDuty/AWSGuardDuty.yml @@ -880,7 +880,7 @@ script: - contextPath: AWS.GuardDuty.Members.UpdatedAt description: The time a member was last updated. type: string - dockerimage: demisto/boto3py3:1.0.0.91694 + dockerimage: demisto/boto3py3:1.0.0.100468 isfetch: true runonce: false script: '-' diff --git a/Packs/AWS-GuardDuty/Integrations/AWSGuardDutyEventCollector/AWSGuardDutyEventCollector.yml b/Packs/AWS-GuardDuty/Integrations/AWSGuardDutyEventCollector/AWSGuardDutyEventCollector.yml index 44cb43122114..3312ff05ab00 100644 --- a/Packs/AWS-GuardDuty/Integrations/AWSGuardDutyEventCollector/AWSGuardDutyEventCollector.yml +++ b/Packs/AWS-GuardDuty/Integrations/AWSGuardDutyEventCollector/AWSGuardDutyEventCollector.yml @@ -121,7 +121,7 @@ script: name: limit description: Manual command used to fetch events and display them. name: aws-gd-get-events - dockerimage: demisto/boto3py3:1.0.0.91694 + dockerimage: demisto/boto3py3:1.0.0.98661 isfetchevents: true subtype: python3 marketplaces: diff --git a/Packs/AWS-GuardDuty/ReleaseNotes/1_3_50.md b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_50.md new file mode 100644 index 000000000000..d9405367ce84 --- /dev/null +++ b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_50.md @@ -0,0 +1,5 @@ +#### Integrations +##### AWS - GuardDuty Event Collector +- Updated the Docker image to: *demisto/boto3py3:1.0.0.98661*. +##### AWS - GuardDuty +- Updated the Docker image to: *demisto/boto3py3:1.0.0.98661*. diff --git a/Packs/AWS-GuardDuty/ReleaseNotes/1_3_51.md b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_51.md new file mode 100644 index 000000000000..9499fa80228b --- /dev/null +++ b/Packs/AWS-GuardDuty/ReleaseNotes/1_3_51.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS - GuardDuty + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100468*. diff --git a/Packs/AWS-GuardDuty/pack_metadata.json b/Packs/AWS-GuardDuty/pack_metadata.json index c17430295aeb..efecb3ce0fff 100644 --- a/Packs/AWS-GuardDuty/pack_metadata.json +++ b/Packs/AWS-GuardDuty/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - GuardDuty", "description": "Amazon Web Services Guard Duty Service (gd)", "support": "xsoar", - "currentVersion": "1.3.49", + "currentVersion": "1.3.51", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS-IAM/.pack-ignore b/Packs/AWS-IAM/.pack-ignore index c4bf677bb405..a9e3dca656b1 100644 --- a/Packs/AWS-IAM/.pack-ignore +++ b/Packs/AWS-IAM/.pack-ignore @@ -1,5 +1,5 @@ [file:AWS-IAM.yml] -ignore=BA108,BA109,IN124 +ignore=BA108,BA109 [file:README.md] ignore=RM104 diff --git a/Packs/AWS-IAM/Integrations/AWS-IAM/AWS-IAM.yml b/Packs/AWS-IAM/Integrations/AWS-IAM/AWS-IAM.yml index bd79398ffa79..c606dfa482b0 100644 --- a/Packs/AWS-IAM/Integrations/AWS-IAM/AWS-IAM.yml +++ b/Packs/AWS-IAM/Integrations/AWS-IAM/AWS-IAM.yml @@ -1545,7 +1545,7 @@ script: - contextPath: AWS.IAM.Roles.AttachedPolicies.Query.Marker description: When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request. type: string - dockerimage: demisto/boto3py3:1.0.0.91694 + dockerimage: demisto/boto3py3:1.0.0.100468 runonce: false script: '-' subtype: python3 diff --git a/Packs/AWS-IAM/ReleaseNotes/1_1_62.md b/Packs/AWS-IAM/ReleaseNotes/1_1_62.md new file mode 100644 index 000000000000..91a43390a263 --- /dev/null +++ b/Packs/AWS-IAM/ReleaseNotes/1_1_62.md @@ -0,0 +1,3 @@ +## AWS - IAM + +- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release. \ No newline at end of file diff --git a/Packs/AWS-IAM/ReleaseNotes/1_1_63.md b/Packs/AWS-IAM/ReleaseNotes/1_1_63.md new file mode 100644 index 000000000000..2e5386746bdc --- /dev/null +++ b/Packs/AWS-IAM/ReleaseNotes/1_1_63.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS - Identity and Access Management + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100468*. diff --git a/Packs/AWS-IAM/pack_metadata.json b/Packs/AWS-IAM/pack_metadata.json index 7ed76c68c188..6f8d4712c802 100644 --- a/Packs/AWS-IAM/pack_metadata.json +++ b/Packs/AWS-IAM/pack_metadata.json @@ -3,7 +3,7 @@ "description": "Amazon Web Services Identity and Access Management (IAM)", "support": "xsoar", "author": "Cortex XSOAR", - "currentVersion": "1.1.61", + "currentVersion": "1.1.63", "url": "https://www.paloaltonetworks.com/cortex", "email": "", "created": "2020-04-14T00:00:00Z", diff --git a/Packs/AWS-IAMIdentityCenter/Integrations/AWSIAMIdentityCenter/AWSIAMIdentityCenter.yml b/Packs/AWS-IAMIdentityCenter/Integrations/AWSIAMIdentityCenter/AWSIAMIdentityCenter.yml index b6d089713b0f..8e069ecf1623 100644 --- a/Packs/AWS-IAMIdentityCenter/Integrations/AWSIAMIdentityCenter/AWSIAMIdentityCenter.yml +++ b/Packs/AWS-IAMIdentityCenter/Integrations/AWSIAMIdentityCenter/AWSIAMIdentityCenter.yml @@ -1173,7 +1173,7 @@ script: required: true description: Updates an IAM Identity Center group for your AWS account. name: aws-iam-identitycenter-update-group - dockerimage: demisto/boto3py3:1.0.0.94100 + dockerimage: demisto/boto3py3:1.0.0.98661 runonce: false script: '' subtype: python3 diff --git a/Packs/AWS-IAMIdentityCenter/ReleaseNotes/1_0_3.md b/Packs/AWS-IAMIdentityCenter/ReleaseNotes/1_0_3.md new file mode 100644 index 000000000000..b0825d987b22 --- /dev/null +++ b/Packs/AWS-IAMIdentityCenter/ReleaseNotes/1_0_3.md @@ -0,0 +1,3 @@ +#### Integrations +##### AWS - IAM Identity Center +- Updated the Docker image to: *demisto/boto3py3:1.0.0.98661*. diff --git a/Packs/AWS-IAMIdentityCenter/pack_metadata.json b/Packs/AWS-IAMIdentityCenter/pack_metadata.json index 5b2ef2d89f4b..4e01cb2afb2b 100644 --- a/Packs/AWS-IAMIdentityCenter/pack_metadata.json +++ b/Packs/AWS-IAMIdentityCenter/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - IAM Identity Center", "description": "AWS IAM Identity Center\n\nWith AWS IAM Identity Center (successor to AWS Single Sign-On), you can manage sign-in security for your workforce identities, also known as workforce users. IAM Identity Center provides one place where you can create or connect workforce users and manage their access centrally across all their AWS accounts and applications. IAM Identity Center is the recommended approach for workforce authentication and authorization in AWS, for organizations of any size and type.", "support": "xsoar", - "currentVersion": "1.0.2", + "currentVersion": "1.0.3", "author": "Cortex XSOAR", "url": "", "email": "", diff --git a/Packs/AWS-ILM/.pack-ignore b/Packs/AWS-ILM/.pack-ignore index 8aabc725611f..e69de29bb2d1 100644 --- a/Packs/AWS-ILM/.pack-ignore +++ b/Packs/AWS-ILM/.pack-ignore @@ -1,3 +0,0 @@ -[file:AWSILM.yml] -ignore=IN124 - diff --git a/Packs/AWS-ILM/Integrations/AWSILM/AWSILM.yml b/Packs/AWS-ILM/Integrations/AWSILM/AWSILM.yml index 5e233ccc7c5c..cd9586f2075d 100644 --- a/Packs/AWS-ILM/Integrations/AWSILM/AWSILM.yml +++ b/Packs/AWS-ILM/Integrations/AWSILM/AWSILM.yml @@ -347,7 +347,7 @@ script: type: Unknown description: Permanently removes a group. execution: true - dockerimage: demisto/python3:3.10.13.84405 + dockerimage: demisto/python3:3.10.14.99865 runonce: false script: '-' subtype: python3 diff --git a/Packs/AWS-ILM/ReleaseNotes/1_0_25.md b/Packs/AWS-ILM/ReleaseNotes/1_0_25.md new file mode 100644 index 000000000000..b3c713c42e04 --- /dev/null +++ b/Packs/AWS-ILM/ReleaseNotes/1_0_25.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS - IAM (user lifecycle management) + +- Updated the Docker image to: *demisto/python3:3.10.14.99865*. diff --git a/Packs/AWS-ILM/pack_metadata.json b/Packs/AWS-ILM/pack_metadata.json index b7cd666a49f1..1768302c7f6f 100644 --- a/Packs/AWS-ILM/pack_metadata.json +++ b/Packs/AWS-ILM/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS-ILM", "description": "IAM Integration for AWS-ILM. This pack handles user account auto-provisioning", "support": "xsoar", - "currentVersion": "1.0.24", + "currentVersion": "1.0.25", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS-Lambda/.pack-ignore b/Packs/AWS-Lambda/.pack-ignore index ea57077bc718..f827794e8cd5 100644 --- a/Packs/AWS-Lambda/.pack-ignore +++ b/Packs/AWS-Lambda/.pack-ignore @@ -1,3 +1,3 @@ [file:AWS_Lambda.yml] -ignore=BA108,BA109,IN124,BA124 +ignore=BA108,BA109,BA124 diff --git a/Packs/AWS-Lambda/Integrations/AWS_Lambda/AWS_Lambda.yml b/Packs/AWS-Lambda/Integrations/AWS_Lambda/AWS_Lambda.yml index 287ddfcd467e..d86bafed21f7 100644 --- a/Packs/AWS-Lambda/Integrations/AWS_Lambda/AWS_Lambda.yml +++ b/Packs/AWS-Lambda/Integrations/AWS_Lambda/AWS_Lambda.yml @@ -1428,7 +1428,7 @@ script: - description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role. name: roleSessionDuration outputs: [] - dockerimage: demisto/boto3py3:1.0.0.91694 + dockerimage: demisto/boto3py3:1.0.0.100468 runonce: false subtype: python3 script: '' diff --git a/Packs/AWS-Lambda/ReleaseNotes/1_3_5.md b/Packs/AWS-Lambda/ReleaseNotes/1_3_5.md new file mode 100644 index 000000000000..1f10b3aa2bd4 --- /dev/null +++ b/Packs/AWS-Lambda/ReleaseNotes/1_3_5.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS - Lambda + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100468*. diff --git a/Packs/AWS-Lambda/pack_metadata.json b/Packs/AWS-Lambda/pack_metadata.json index 91e048fa8562..aa2bcf3bc828 100644 --- a/Packs/AWS-Lambda/pack_metadata.json +++ b/Packs/AWS-Lambda/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - Lambda", "description": "Amazon Web Services Serverless Compute service (lambda)", "support": "xsoar", - "currentVersion": "1.3.4", + "currentVersion": "1.3.5", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS-NetworkFirewall/.pack-ignore b/Packs/AWS-NetworkFirewall/.pack-ignore index 4e77d1e376a4..93816a6c6ed7 100644 --- a/Packs/AWS-NetworkFirewall/.pack-ignore +++ b/Packs/AWS-NetworkFirewall/.pack-ignore @@ -1,3 +1,3 @@ [file:AWS-NetworkFirewall.yml] -ignore=BA108,BA109,IN124,BA124 +ignore=BA108,BA109,BA124 diff --git a/Packs/AWS-Route53/ReleaseNotes/1_1_34.md b/Packs/AWS-Route53/ReleaseNotes/1_1_34.md new file mode 100644 index 000000000000..811cd448d769 --- /dev/null +++ b/Packs/AWS-Route53/ReleaseNotes/1_1_34.md @@ -0,0 +1,3 @@ +## AWS - Route53 + +- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release. \ No newline at end of file diff --git a/Packs/AWS-Route53/pack_metadata.json b/Packs/AWS-Route53/pack_metadata.json index 13e225c7b4c1..b797842d69c8 100644 --- a/Packs/AWS-Route53/pack_metadata.json +++ b/Packs/AWS-Route53/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - Route53", "description": "Amazon Web Services Managed Cloud DNS Service.", "support": "xsoar", - "currentVersion": "1.1.33", + "currentVersion": "1.1.34", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS-S3/.pack-ignore b/Packs/AWS-S3/.pack-ignore index 78c4d9fdfd12..15f99299c9b1 100644 --- a/Packs/AWS-S3/.pack-ignore +++ b/Packs/AWS-S3/.pack-ignore @@ -1,3 +1,3 @@ [file:AWS-S3.yml] -ignore=BA108,BA109,IN124 +ignore=BA108,BA109 diff --git a/Packs/AWS-S3/Integrations/AWS-S3/AWS-S3.yml b/Packs/AWS-S3/Integrations/AWS-S3/AWS-S3.yml index a777fc517bf2..a93f914f7d0d 100644 --- a/Packs/AWS-S3/Integrations/AWS-S3/AWS-S3.yml +++ b/Packs/AWS-S3/Integrations/AWS-S3/AWS-S3.yml @@ -397,7 +397,7 @@ script: required: true description: Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. name: aws-s3-put-public-access-block - dockerimage: demisto/boto3py3:1.0.0.87582 + dockerimage: demisto/boto3py3:1.0.0.100496 runonce: false script: '' type: python diff --git a/Packs/AWS-S3/ReleaseNotes/1_2_24.md b/Packs/AWS-S3/ReleaseNotes/1_2_24.md new file mode 100644 index 000000000000..e933a273ba4d --- /dev/null +++ b/Packs/AWS-S3/ReleaseNotes/1_2_24.md @@ -0,0 +1,3 @@ +## AWS - S3 + +- Locked dependencies of the pack to ensure stability for versioned core packs. No changes in this release. \ No newline at end of file diff --git a/Packs/AWS-S3/ReleaseNotes/1_2_25.md b/Packs/AWS-S3/ReleaseNotes/1_2_25.md new file mode 100644 index 000000000000..ce1bca76b300 --- /dev/null +++ b/Packs/AWS-S3/ReleaseNotes/1_2_25.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS - S3 + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100496*. diff --git a/Packs/AWS-S3/pack_metadata.json b/Packs/AWS-S3/pack_metadata.json index cc2db1a8eb35..5cdecad892d1 100644 --- a/Packs/AWS-S3/pack_metadata.json +++ b/Packs/AWS-S3/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - S3", "description": "Amazon Web Services Simple Storage Service (S3)", "support": "xsoar", - "currentVersion": "1.2.23", + "currentVersion": "1.2.25", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS-SNS/Integrations/AWSSNS/AWSSNS.py b/Packs/AWS-SNS/Integrations/AWSSNS/AWSSNS.py index bcc14b66d59f..f7d33ba6ad7b 100644 --- a/Packs/AWS-SNS/Integrations/AWSSNS/AWSSNS.py +++ b/Packs/AWS-SNS/Integrations/AWSSNS/AWSSNS.py @@ -180,8 +180,8 @@ def main(): # pragma no cover aws_role_session_name = params.get('roleSessionName') aws_role_session_duration = params.get('sessionDuration') aws_role_policy = None - aws_access_key_id = params.get('access_key') - aws_secret_access_key = params.get('secret_key') + aws_access_key_id = params.get('credentials', {}).get('identifier', '') + aws_secret_access_key = params.get('credentials', {}).get('password', '') verify_certificate = not params.get('insecure', False) timeout = params.get('timeout') retries = params.get('retries') or 5 diff --git a/Packs/AWS-SNS/Integrations/AWSSNS/AWSSNS.yml b/Packs/AWS-SNS/Integrations/AWSSNS/AWSSNS.yml index 31adfaf2f9b5..a7693c0d3087 100644 --- a/Packs/AWS-SNS/Integrations/AWSSNS/AWSSNS.yml +++ b/Packs/AWS-SNS/Integrations/AWSSNS/AWSSNS.yml @@ -17,37 +17,34 @@ configuration: type: 0 required: false - display: Access Key - name: access_key - required: true - type: 0 -- display: Secret Key - name: secret_key + name: credentials required: true type: 9 + displaypassword: Secret Key - display: Role Session Duration name: sessionDuration - type: 0 required: false -- additionalinfo: The time in seconds till a timeout exception is reached. You can specify just the read timeout (for example 60) or also the connect timeout followed after a comma (for example 60,10). If a connect timeout is not specified, a default of 10 second will be used. - display: Timeout + type: 0 +- display: Timeout name: timeout type: 0 required: false + additionalinfo: The time in seconds till a timeout exception is reached. You can specify just the read timeout (for example 60) or also the connect timeout followed after a comma (for example 60,10). If a connect timeout is not specified, a default of 10 second will be used. - additionalinfo: 'The maximum number of retry attempts when connection or throttling errors are encountered. Set to 0 to disable retries. The default value is 5 and the limit is 10. Note: Increasing the number of retries will increase the execution time.' - defaultvalue: '5' display: Retries name: retries type: 0 required: false -- display: AWS STS Regional Endpoints - additionalinfo: Sets the AWS_STS_REGIONAL_ENDPOINTS environment variable to specify the AWS STS endpoint resolution logic. By default, this option is set to “legacy” in AWS. Leave empty if the environment variable is already set using server configuration. + defaultvalue: '5' +- additionalinfo: 'Sets the AWS_STS_REGIONAL_ENDPOINTS environment variable to specify the AWS STS endpoint resolution logic. By default, this option is set to “legacy” in AWS. Leave empty if the environment variable is already set using server configuration.' + display: AWS STS Regional Endpoints name: sts_regional_endpoint + type: 15 + required: false options: - legacy - regional - type: 15 section: Connect - required: false - display: Use system proxy settings name: proxy type: 8 @@ -234,7 +231,7 @@ script: outputs: - contextPath: AWS.SNS.Subscriptions.SubscriptionArn description: The Subscription Arn. - dockerimage: demisto/boto3py3:1.0.0.91694 + dockerimage: demisto/boto3py3:1.0.0.98039 script: '' subtype: python3 type: python diff --git a/Packs/AWS-SNS/Integrations/AWSSNS/README.md b/Packs/AWS-SNS/Integrations/AWSSNS/README.md index ef6e73f4619d..7946d5ccbeb6 100644 --- a/Packs/AWS-SNS/Integrations/AWSSNS/README.md +++ b/Packs/AWS-SNS/Integrations/AWSSNS/README.md @@ -12,7 +12,7 @@ For detailed instructions about setting up authentication, see: [AWS Integration | **Parameter** | **Description** | **Required** | | --- | --- | --- | - | AWS Default Region | | False | + | AWS Default Region | | True | | Role Arn | When using Access Key and Secret Key, there is no need to use Role Arn | False | | Role Session Name | | False | | Access Key | | True | @@ -20,10 +20,12 @@ For detailed instructions about setting up authentication, see: [AWS Integration | Role Session Duration | | False | | Timeout | The time in seconds till a timeout exception is reached. You can specify just the read timeout \(for example 60\) or also the connect timeout followed after a comma \(for example 60,10\). If a connect timeout is not specified, a default of 10 second will be used. | False | | Retries | The maximum number of retry attempts when connection or throttling errors are encountered. Set to 0 to disable retries. The default value is 5 and the limit is 10. Note: Increasing the number of retries will increase the execution time. | False | + | AWS STS Regional Endpoints | Sets the AWS_STS_REGIONAL_ENDPOINTS environment variable to specify the AWS STS endpoint resolution logic. By default, this option is set to “legacy” in AWS. Leave empty if the environment variable is already set using server configuration. | False | | Use system proxy settings | | False | | Trust any certificate (not secure) | | False | 4. Click **Test** to validate the URLs, token, and connection. + ## Commands You can execute these commands from the Cortex XSOAR CLI, as part of an automation, or in a playbook. After you successfully execute a command, a DBot message appears in the War Room with the command details. @@ -242,4 +244,3 @@ Returns a list of the subscriptions to a specific topic. Each call returns a lim #### Human Readable Output - diff --git a/Packs/AWS-SNS/ReleaseNotes/1_0_16.md b/Packs/AWS-SNS/ReleaseNotes/1_0_16.md new file mode 100644 index 000000000000..ebaba0c114ac --- /dev/null +++ b/Packs/AWS-SNS/ReleaseNotes/1_0_16.md @@ -0,0 +1,7 @@ + +#### Integrations + +##### AWS - SNS + +- Improved implementation by removing a redundant *password* configuration field. +- Updated the Docker image to: *demisto/boto3py3:1.0.0.98039*. diff --git a/Packs/AWS-SNS/pack_metadata.json b/Packs/AWS-SNS/pack_metadata.json index a3b20560771a..6528d4fa7e83 100644 --- a/Packs/AWS-SNS/pack_metadata.json +++ b/Packs/AWS-SNS/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - SNS", "description": "This is the integration content pack which can create or delete topic/subscription on AWS Simple Notification System and send the message via SNS as well.", "support": "xsoar", - "currentVersion": "1.0.15", + "currentVersion": "1.0.16", "author": "Jie Liau", "url": "", "email": "", diff --git a/Packs/AWS-SQS/.pack-ignore b/Packs/AWS-SQS/.pack-ignore index 3bb534335e6c..f3c0982990ff 100644 --- a/Packs/AWS-SQS/.pack-ignore +++ b/Packs/AWS-SQS/.pack-ignore @@ -2,7 +2,7 @@ ignore=IN126 [file:AWS-SQS.yml] -ignore=IN126,BA108,BA109,IN124 +ignore=IN126,BA108,BA109 [known_words] abuseipdb diff --git a/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS.yml b/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS.yml index 1309e9dd1ee8..61e17d3478ee 100644 --- a/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS.yml +++ b/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS.yml @@ -275,7 +275,7 @@ script: - name: roleSessionDuration description: The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) up to the maximum session duration setting for the role. description: Deletes the messages in a queue specified by the QueueURL parameter. - dockerimage: demisto/boto3py3:1.0.0.87582 + dockerimage: demisto/boto3py3:1.0.0.100468 isfetch: true tests: - AWS - SQS Test Playbook diff --git a/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS_test.py b/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS_test.py index d460d3809c8b..9371e7816aef 100644 --- a/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS_test.py +++ b/Packs/AWS-SQS/Integrations/AWS-SQS/AWS-SQS_test.py @@ -1,5 +1,4 @@ import pytest -import demistomock as demisto from AWSApiModule import AWSClient aws_sqs = __import__('AWS-SQS') @@ -40,12 +39,12 @@ def delete_message(): @pytest.mark.parametrize('lastReceiptHandles, messages, args, expected', MOCK_FETCH_INCIDENTS) def test_fetch_incidents(mocker, lastReceiptHandles, messages, args, expected): - mocker.patch('AWS-SQS.demisto.getLastRun', return_value=lastReceiptHandles) - mocker.patch('AWS-SQS.demisto.setLastRun', return_value='test') + mocker.patch.object(aws_sqs.demisto, 'getLastRun', return_value=lastReceiptHandles) + mocker.patch.object(aws_sqs.demisto, 'setLastRun', return_value='test') client = mocker.patch.object(AWSClient, 'aws_session', return_value=mock_class()) mocker.patch.object(client.return_value, 'receive_message', side_effect=messages) mocker.patch.object(client.return_value, 'delete_message', return_value='test') - mocker.patch('AWS-SQS.parse_incident_from_finding', return_value='test') - incidents_mocker = mocker.patch.object(demisto, 'incidents') + mocker.patch.object(aws_sqs, 'parse_incident_from_finding', return_value='test') + incidents_mocker = mocker.patch.object(aws_sqs.demisto, 'incidents') aws_sqs.fetch_incidents(**args) assert len(incidents_mocker.call_args[0][0]) == expected diff --git a/Packs/AWS-SQS/ReleaseNotes/1_2_26.md b/Packs/AWS-SQS/ReleaseNotes/1_2_26.md new file mode 100644 index 000000000000..a6d6f1b6f71c --- /dev/null +++ b/Packs/AWS-SQS/ReleaseNotes/1_2_26.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS - SQS + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100468*. diff --git a/Packs/AWS-SQS/pack_metadata.json b/Packs/AWS-SQS/pack_metadata.json index 9152dc7eb4e1..cbfa848729d9 100644 --- a/Packs/AWS-SQS/pack_metadata.json +++ b/Packs/AWS-SQS/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - SQS", "description": "Amazon Web Services Simple Queuing Service (SQS)", "support": "xsoar", - "currentVersion": "1.2.25", + "currentVersion": "1.2.26", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS-SecurityHub/.pack-ignore b/Packs/AWS-SecurityHub/.pack-ignore index 53fd637ac11c..c35c1dbffc6b 100644 --- a/Packs/AWS-SecurityHub/.pack-ignore +++ b/Packs/AWS-SecurityHub/.pack-ignore @@ -1,5 +1,5 @@ [file:AWS_SecurityHub.yml] -ignore=IN126,IN136,BA108,BA109,IN124 +ignore=IN126,IN136,BA108,BA109 [known_words] datetime diff --git a/Packs/AWS-SecurityHub/Integrations/AWSSecurityHubEventCollector/AWSSecurityHubEventCollector.yml b/Packs/AWS-SecurityHub/Integrations/AWSSecurityHubEventCollector/AWSSecurityHubEventCollector.yml index 543a73444360..6c8778e0452b 100644 --- a/Packs/AWS-SecurityHub/Integrations/AWSSecurityHubEventCollector/AWSSecurityHubEventCollector.yml +++ b/Packs/AWS-SecurityHub/Integrations/AWSSecurityHubEventCollector/AWSSecurityHubEventCollector.yml @@ -125,7 +125,7 @@ script: name: limit description: Fetch events from AWS Security Hub. name: aws-securityhub-get-events - dockerimage: demisto/boto3py3:1.0.0.91694 + dockerimage: demisto/boto3py3:1.0.0.98661 isfetchevents: true script: '-' subtype: python3 diff --git a/Packs/AWS-SecurityHub/Integrations/AWS_SecurityHub/AWS_SecurityHub.yml b/Packs/AWS-SecurityHub/Integrations/AWS_SecurityHub/AWS_SecurityHub.yml index 172a78ec4fa9..68aa7cf862a1 100644 --- a/Packs/AWS-SecurityHub/Integrations/AWS_SecurityHub/AWS_SecurityHub.yml +++ b/Packs/AWS-SecurityHub/Integrations/AWS_SecurityHub/AWS_SecurityHub.yml @@ -2077,7 +2077,7 @@ script: description: The UTC timestamp in seconds since the last update. The incident is only updated if it was modified after the last update time. - name: get-mapping-fields description: Returns the list of fields to map in outgoing mirroring. This command is only used for debugging purposes. - dockerimage: demisto/boto3py3:1.0.0.89611 + dockerimage: demisto/boto3py3:1.0.0.100468 isfetch: true ismappable: true isremotesyncin: true diff --git a/Packs/AWS-SecurityHub/ReleaseNotes/1_3_33.md b/Packs/AWS-SecurityHub/ReleaseNotes/1_3_33.md new file mode 100644 index 000000000000..fbef5b391494 --- /dev/null +++ b/Packs/AWS-SecurityHub/ReleaseNotes/1_3_33.md @@ -0,0 +1,3 @@ +#### Integrations +##### AWS Security Hub Event Collector +- Updated the Docker image to: *demisto/boto3py3:1.0.0.98661*. diff --git a/Packs/AWS-SecurityHub/ReleaseNotes/1_3_34.md b/Packs/AWS-SecurityHub/ReleaseNotes/1_3_34.md new file mode 100644 index 000000000000..c363a6ab88e7 --- /dev/null +++ b/Packs/AWS-SecurityHub/ReleaseNotes/1_3_34.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS - Security Hub + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100468*. diff --git a/Packs/AWS-SecurityHub/pack_metadata.json b/Packs/AWS-SecurityHub/pack_metadata.json index 2bd7bc38249c..5a367e3bed1b 100644 --- a/Packs/AWS-SecurityHub/pack_metadata.json +++ b/Packs/AWS-SecurityHub/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS - Security Hub", "description": "Amazon Web Services Security Hub Service.", "support": "xsoar", - "currentVersion": "1.3.32", + "currentVersion": "1.3.34", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS-SecurityLake/Integrations/AWSSecurityLake/AWSSecurityLake.yml b/Packs/AWS-SecurityLake/Integrations/AWSSecurityLake/AWSSecurityLake.yml index 56652f892cb8..cb0e59d50145 100644 --- a/Packs/AWS-SecurityLake/Integrations/AWSSecurityLake/AWSSecurityLake.yml +++ b/Packs/AWS-SecurityLake/Integrations/AWSSecurityLake/AWSSecurityLake.yml @@ -1211,7 +1211,7 @@ script: script: "-" type: python subtype: python3 - dockerimage: demisto/boto3py3:1.0.0.91694 + dockerimage: demisto/boto3py3:1.0.0.98661 feed: false isfetch: false fromversion: 6.10.0 diff --git a/Packs/AWS-SecurityLake/ReleaseNotes/1_0_9.md b/Packs/AWS-SecurityLake/ReleaseNotes/1_0_9.md new file mode 100644 index 000000000000..50ca5a557cb0 --- /dev/null +++ b/Packs/AWS-SecurityLake/ReleaseNotes/1_0_9.md @@ -0,0 +1,3 @@ +#### Integrations +##### Amazon Security Lake +- Updated the Docker image to: *demisto/boto3py3:1.0.0.98661*. diff --git a/Packs/AWS-SecurityLake/pack_metadata.json b/Packs/AWS-SecurityLake/pack_metadata.json index c13c9de22077..79b4148875ed 100644 --- a/Packs/AWS-SecurityLake/pack_metadata.json +++ b/Packs/AWS-SecurityLake/pack_metadata.json @@ -2,7 +2,7 @@ "name": "Amazon - Security Lake", "description": "Amazon Security Lake is a fully managed security data lake service.", "support": "xsoar", - "currentVersion": "1.0.8", + "currentVersion": "1.0.9", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS_DynamoDB/.pack-ignore b/Packs/AWS_DynamoDB/.pack-ignore index f5519e0fb0f6..7c8bec102977 100644 --- a/Packs/AWS_DynamoDB/.pack-ignore +++ b/Packs/AWS_DynamoDB/.pack-ignore @@ -1,5 +1,5 @@ [file:AWS_DynamoDB.yml] -ignore=BA108,BA109,IN124 +ignore=BA108,BA109 [file:README.md] ignore=RM106 diff --git a/Packs/AWS_DynamoDB/Integrations/AWS_DynamoDB/AWS_DynamoDB.yml b/Packs/AWS_DynamoDB/Integrations/AWS_DynamoDB/AWS_DynamoDB.yml index 60993ead1004..14ae5447670a 100644 --- a/Packs/AWS_DynamoDB/Integrations/AWS_DynamoDB/AWS_DynamoDB.yml +++ b/Packs/AWS_DynamoDB/Integrations/AWS_DynamoDB/AWS_DynamoDB.yml @@ -2998,7 +2998,7 @@ script: description: The name of the TTL attribute used to store the expiration time for items in the table. - contextPath: AWS-DynamoDB.TimeToLiveSpecification description: Represents the output of an UpdateTimeToLive operation. - dockerimage: demisto/boto3py3:1.0.0.87655 + dockerimage: demisto/boto3py3:1.0.0.100468 script: '' subtype: python3 type: python diff --git a/Packs/AWS_DynamoDB/ReleaseNotes/1_0_34.md b/Packs/AWS_DynamoDB/ReleaseNotes/1_0_34.md new file mode 100644 index 000000000000..785cbb4788f0 --- /dev/null +++ b/Packs/AWS_DynamoDB/ReleaseNotes/1_0_34.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### Amazon DynamoDB + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100468*. diff --git a/Packs/AWS_DynamoDB/pack_metadata.json b/Packs/AWS_DynamoDB/pack_metadata.json index 7789efae3cbc..b5cd3a4855c0 100644 --- a/Packs/AWS_DynamoDB/pack_metadata.json +++ b/Packs/AWS_DynamoDB/pack_metadata.json @@ -2,7 +2,7 @@ "name": "Amazon DynamoDB", "description": "Amazon DynamoDB Amazon DynamoDB is a fully managed NoSQL database service that provides fast and predictable performance with seamless scalability. DynamoDB lets you offload the administrative burdens of operating and scaling a distributed database, so that you don't have to worry about hardware provisioning, setup and configuration, replication, software patching, or cluster scaling. With DynamoDB, you can create database tables that can store and retrieve any amount of data, and serve any level of request traffic. You can scale up or scale down your tables' throughput capacity without downtime or performance degradation, and use the AWS Management Console to monitor resource utilization and performance metrics. DynamoDB automatically spreads the data and traffic for your tables over a sufficient number of servers to handle your throughput and storage requirements, while maintaining consistent and fast performance. All of your data is stored on solid state disks (SSDs) and automatically replicated across multiple Availability Zones in an AWS region, providing built-in high availability and data durability. ", "support": "xsoar", - "currentVersion": "1.0.33", + "currentVersion": "1.0.34", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS_Sagemaker/.pack-ignore b/Packs/AWS_Sagemaker/.pack-ignore index a5768b144c65..ed3f7d2b935a 100644 --- a/Packs/AWS_Sagemaker/.pack-ignore +++ b/Packs/AWS_Sagemaker/.pack-ignore @@ -1,5 +1,5 @@ [file:AWSSagemaker.yml] -ignore=IN124,BA124 +ignore=BA124 [known_words] sagemaker diff --git a/Packs/AWS_Sagemaker/Integrations/AWSSagemaker/AWSSagemaker.yml b/Packs/AWS_Sagemaker/Integrations/AWSSagemaker/AWSSagemaker.yml index 29d39bd7db4b..ae8120bcc559 100644 --- a/Packs/AWS_Sagemaker/Integrations/AWSSagemaker/AWSSagemaker.yml +++ b/Packs/AWS_Sagemaker/Integrations/AWSSagemaker/AWSSagemaker.yml @@ -63,7 +63,7 @@ script: description: The predication probability (range 0-1). type: number description: Classify input text (usually email content). - dockerimage: demisto/boto3py3:1.0.0.89670 + dockerimage: demisto/boto3py3:1.0.0.100496 tests: - Test Sagemaker fromversion: 5.0.0 diff --git a/Packs/AWS_Sagemaker/ReleaseNotes/1_1_6.md b/Packs/AWS_Sagemaker/ReleaseNotes/1_1_6.md new file mode 100644 index 000000000000..70b3a1255f63 --- /dev/null +++ b/Packs/AWS_Sagemaker/ReleaseNotes/1_1_6.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS Sagemaker + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100496*. diff --git a/Packs/AWS_Sagemaker/pack_metadata.json b/Packs/AWS_Sagemaker/pack_metadata.json index 46347dfe780d..8efd1c0c7957 100644 --- a/Packs/AWS_Sagemaker/pack_metadata.json +++ b/Packs/AWS_Sagemaker/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS Sagemaker", "description": "AWS Sagemaker - Demisto Phishing Email Classifier", "support": "xsoar", - "currentVersion": "1.1.5", + "currentVersion": "1.1.6", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AWS_SystemManager/.pack-ignore b/Packs/AWS_SystemManager/.pack-ignore index e69de29bb2d1..52a08804c45f 100644 --- a/Packs/AWS_SystemManager/.pack-ignore +++ b/Packs/AWS_SystemManager/.pack-ignore @@ -0,0 +1,2 @@ +[file:README.md] +ignore=RM104 \ No newline at end of file diff --git a/Packs/AWS_SystemManager/Integrations/AWSSystemManager/AWSSystemManager.yml b/Packs/AWS_SystemManager/Integrations/AWSSystemManager/AWSSystemManager.yml index a26e9074a018..002a025809ba 100644 --- a/Packs/AWS_SystemManager/Integrations/AWSSystemManager/AWSSystemManager.yml +++ b/Packs/AWS_SystemManager/Integrations/AWSSystemManager/AWSSystemManager.yml @@ -2385,10 +2385,11 @@ script: script: '-' type: python subtype: python3 - dockerimage: demisto/boto3py3:1.0.0.89670 + dockerimage: demisto/boto3py3:1.0.0.100294 fromversion: 6.9.0 marketplaces: - xsoar - marketplacev2 +- xpanse tests: - No tests (auto formatted) diff --git a/Packs/AWS_SystemManager/Playbooks/AWS_-_Package_Upgrade.yml b/Packs/AWS_SystemManager/Playbooks/AWS_-_Package_Upgrade.yml new file mode 100644 index 000000000000..c6895c6f03cd --- /dev/null +++ b/Packs/AWS_SystemManager/Playbooks/AWS_-_Package_Upgrade.yml @@ -0,0 +1,549 @@ +description: This playbook upgrades supported packages on an AWS EC2 instance using AWS Systems manager. +id: AWS - Package Upgrade +inputSections: +- description: Generic group for inputs. + inputs: + - ASM Rule ID + - Instance ID + - Region + - Assume Role + - Account ID + name: General (Inputs group) +inputs: +- description: ASM rule ID. + key: ASM Rule ID + playbookInputQuery: + required: true + value: + simple: ${alert.asmattacksurfaceruleid} +- description: Instance ID of the EC2. + key: Instance ID + playbookInputQuery: + required: true + value: {} +- description: AWS region of the EC2 instance. + key: Region + playbookInputQuery: + required: true + value: {} +- description: AWS role to be assumed. + key: Assume Role + playbookInputQuery: + required: false + value: {} +- description: AWS account ID. + key: Account ID + playbookInputQuery: + required: false + value: {} +name: AWS - Package Upgrade +outputSections: +- description: Generic group for outputs. + name: General (Outputs group) + outputs: + - remediatedFlag +outputs: +- contextPath: remediatedFlag + description: Whether package is upgraded. +starttaskid: "0" +tasks: + "0": + continueonerrortype: "" + id: "0" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + nexttasks: + '#none#': + - "2" + note: false + quietmode: 0 + separatecontext: false + skipunavailable: false + task: + brand: "" + id: 6ff3e273-643f-4981-8d73-7131697d49d6 + iscommand: false + name: "" + version: -1 + description: '' + taskid: 6ff3e273-643f-4981-8d73-7131697d49d6 + timertriggers: [] + type: start + view: |- + { + "position": { + "x": 450, + "y": 50 + } + } + "2": + conditions: + - condition: + - - left: + iscontext: true + value: + simple: inputs.ASM Rule ID + operator: isExists + right: + value: {} + - - left: + iscontext: true + value: + complex: + filters: + - - left: + iscontext: true + value: + simple: modules.brand + operator: isEqualString + right: + value: + simple: AWS - System Manager + - - left: + iscontext: true + value: + simple: modules.state + operator: isEqualString + right: + value: + simple: active + root: modules + operator: isExists + - - left: + iscontext: true + value: + simple: inputs.Instance ID + operator: isExists + - - left: + iscontext: true + value: + simple: inputs.Region + operator: isExists + label: "yes" + continueonerrortype: "" + id: "2" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + nexttasks: + '#default#': + - "10" + "yes": + - "7" + note: false + quietmode: 0 + separatecontext: false + skipunavailable: false + task: + brand: "" + description: Determines if the AWS - Systems Manager integration instance is configured and input values are defined. + id: c9829402-26c5-4efb-8e0f-077ce690d1a4 + iscommand: false + name: Is AWS - Systems Manager enabled and are input values defined? + type: condition + version: -1 + taskid: c9829402-26c5-4efb-8e0f-077ce690d1a4 + timertriggers: [] + type: condition + view: |- + { + "position": { + "x": 450, + "y": 250 + } + } + "3": + continueonerrortype: "" + id: "3" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + note: false + quietmode: 0 + separatecontext: false + skipunavailable: false + task: + brand: "" + id: 379598ad-1166-488f-8856-b513aaca3431 + iscommand: false + name: Done + type: title + version: -1 + description: '' + taskid: 379598ad-1166-488f-8856-b513aaca3431 + timertriggers: [] + type: title + view: |- + { + "position": { + "x": -120, + "y": 2200 + } + } + "5": + continueonerrortype: "" + id: "5" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + loop: + exitCondition: "" + iscommand: false + max: 100 + wait: 1 + nexttasks: + '#none#': + - "6" + note: false + quietmode: 0 + scriptarguments: + Ids: + simple: ${awspackageupgrade.run_command_id} + Interval: + simple: "1" + PollingCommandArgName: + simple: command_id + PollingCommandName: + simple: aws-ssm-command-list + Timeout: + simple: "30" + dt: + simple: AWS.SSM.Command(val.Status !== 'Success').CommandId + separatecontext: true + skipunavailable: false + task: + brand: "" + description: |- + Use this playbook as a sub-playbook to block execution of the master playbook until a remote action is complete. + This playbook implements polling by continuously running the command in Step \#2 until the operation completes. + The remote action should have the following structure: + + 1. Initiate the operation. + 2. Poll to check if the operation completed. + 3. (optional) Get the results of the operation. + id: 8b226c89-b40d-424e-8d9f-950fa69c0ee1 + iscommand: false + name: GenericPolling + playbookId: GenericPolling + type: playbook + version: -1 + taskid: 8b226c89-b40d-424e-8d9f-950fa69c0ee1 + timertriggers: [] + type: playbook + view: |- + { + "position": { + "x": 450, + "y": 1290 + } + } + "6": + continueonerrortype: "" + id: "6" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + nexttasks: + '#none#': + - "11" + note: false + quietmode: 0 + scriptarguments: + command_id: + simple: ${awspackageupgrade.run_command_id} + roleArn: + simple: ${inputs.Assume Role} + roleSessionName: + simple: AWS-SSM-Command + separatecontext: false + skipunavailable: false + task: + brand: AWS - System Manager + description: Lists the commands requested by users of the Amazon Web Services account. + id: c45424b5-c696-4708-8f17-e906c08d0296 + iscommand: true + name: Get status of command + script: AWS - System Manager|||aws-ssm-command-list + type: regular + version: -1 + taskid: c45424b5-c696-4708-8f17-e906c08d0296 + timertriggers: [] + type: regular + view: |- + { + "position": { + "x": 450, + "y": 1480 + } + } + "7": + conditions: + - condition: + - - left: + iscontext: true + value: + simple: inputs.ASM Rule ID + operator: inList + right: + value: + simple: InsecureOpenSSH + label: "yes" + continueonerrortype: "" + id: "7" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + nexttasks: + '#default#': + - "10" + "yes": + - "12" + note: false + quietmode: 0 + separatecontext: false + skipunavailable: false + task: + brand: "" + description: Does the provided ASM Rule ID exist in the list? + id: a9465063-4c61-481c-8c57-242007a24466 + iscommand: false + name: Does this ASM Rule ID support AWS EC2 Patching? + type: condition + version: -1 + taskid: a9465063-4c61-481c-8c57-242007a24466 + timertriggers: [] + type: condition + view: |- + { + "position": { + "x": 450, + "y": 510 + } + } + "8": + conditions: + - condition: + - - left: + iscontext: true + value: + simple: ${awspackageupgrade.run_command_flag} + operator: isTrue + right: + value: {} + label: "yes" + continueonerrortype: "" + id: "8" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + nexttasks: + '#default#': + - "10" + "yes": + - "5" + note: false + quietmode: 0 + separatecontext: false + skipunavailable: false + task: + brand: "" + description: Checks if the package upgrade command run is initiated via AWS SSM. + id: 802dfb1f-c0fa-4ad4-88f2-f36c3d480814 + iscommand: false + name: AWS SSM Command Run Initiated? + type: condition + version: -1 + taskid: 802dfb1f-c0fa-4ad4-88f2-f36c3d480814 + timertriggers: [] + type: condition + view: |- + { + "position": { + "x": 450, + "y": 1010 + } + } + "9": + continueonerrortype: "" + id: "9" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + nexttasks: + '#none#': + - "3" + note: false + quietmode: 0 + scriptarguments: + key: + simple: remediatedFlag + stringify: + simple: "false" + value: + simple: "true" + separatecontext: false + skipunavailable: false + task: + brand: "" + description: Set a value in context under the key you entered. + id: 8664fefc-0f3c-482c-8a10-784b03e9eeb3 + iscommand: false + name: Set remediatedFlag to true + script: Set + type: regular + version: -1 + taskid: 8664fefc-0f3c-482c-8a10-784b03e9eeb3 + timertriggers: [] + type: regular + view: |- + { + "position": { + "x": 450, + "y": 1970 + } + } + "10": + continueonerrortype: "" + id: "10" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + nexttasks: + '#none#': + - "3" + note: false + quietmode: 0 + scriptarguments: + key: + simple: remediatedFlag + stringify: + simple: "false" + value: + simple: "false" + separatecontext: false + skipunavailable: false + task: + brand: "" + description: Set a value in context under the key you entered. + id: 24eb5eec-b581-45f8-8de7-841612349784 + iscommand: false + name: Set remediatedFlag to false + script: Set + type: regular + version: -1 + taskid: 24eb5eec-b581-45f8-8de7-841612349784 + timertriggers: [] + type: regular + view: |- + { + "position": { + "x": -120, + "y": 1970 + } + } + "11": + conditions: + - condition: + - - left: + iscontext: true + value: + simple: AWS.SSM.Command.Status + operator: isEqualString + right: + value: + simple: Success + label: "yes" + continueonerrortype: "" + id: "11" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + nexttasks: + '#default#': + - "10" + "yes": + - "9" + note: false + quietmode: 0 + separatecontext: false + skipunavailable: false + task: + brand: "" + description: Was the command run successful? + id: b43b0cce-00a6-461d-8d6b-96c5d467c1af + iscommand: false + name: AWS Package Upgrade success? + type: condition + version: -1 + taskid: b43b0cce-00a6-461d-8d6b-96c5d467c1af + timertriggers: [] + type: condition + view: |- + { + "position": { + "x": 450, + "y": 1670 + } + } + "12": + continueonerrortype: "" + id: "12" + ignoreworker: false + isautoswitchedtoquietmode: false + isoversize: false + nexttasks: + '#none#': + - "8" + note: false + quietmode: 0 + scriptarguments: + account_id: + simple: ${inputs.Account ID} + asm_rule_id: + simple: ${inputs.ASM Rule ID} + assume_role: + simple: ${inputs.Assume Role} + instance_id: + simple: ${inputs.Instance ID} + region: + simple: ${inputs.Region} + version: + simple: ${inputs.Version} + separatecontext: false + skipunavailable: false + task: + brand: "" + description: This is an AWS script that upgrades a package on the AWS EC2 instance using AWS Systems manager. + id: 5f029ce5-bc0d-4555-800c-47be2c8ea935 + iscommand: false + name: 'AWS Package Upgrade ' + script: AWSPackageUpgrade + type: regular + version: -1 + taskid: 5f029ce5-bc0d-4555-800c-47be2c8ea935 + timertriggers: [] + type: regular + view: |- + { + "position": { + "x": 450, + "y": 800 + } + } +version: -1 +view: |- + { + "linkLabelsPosition": {}, + "paper": { + "dimensions": { + "height": 2215, + "width": 950, + "x": -120, + "y": 50 + } + } + } +tests: +- No tests (auto formatted) +fromversion: 6.10.0 diff --git a/Packs/AWS_SystemManager/Playbooks/AWS_-_Package_Upgrade_README.md b/Packs/AWS_SystemManager/Playbooks/AWS_-_Package_Upgrade_README.md new file mode 100644 index 000000000000..1e77faf209d9 --- /dev/null +++ b/Packs/AWS_SystemManager/Playbooks/AWS_-_Package_Upgrade_README.md @@ -0,0 +1,48 @@ +This playbook upgrades supported packages on an AWS EC2 instance using AWS Systems manager. + +## Dependencies + +This playbook uses the following sub-playbooks, integrations, and scripts. + +### Sub-playbooks + +* GenericPolling + +### Integrations + +* AWS - System Manager + +### Scripts + +* AWSPackageUpgrade +* Set + +### Commands + +* aws-ssm-command-list + +## Playbook Inputs + +--- + +| **Name** | **Description** | **Default Value** | **Required** | +| --- | --- | --- | --- | +| ASM Rule ID | ASM rule ID. | ${alert.asmattacksurfaceruleid} | Required | +| Instance ID | Instance ID of the EC2. | | Required | +| Region | AWS region of the EC2 instance. | | Required | +| Assume Role | AWS role to be assumed. | | Optional | +| Account ID | AWS account ID. | | Optional | + +## Playbook Outputs + +--- + +| **Path** | **Description** | **Type** | +| --- | --- | --- | +| remediatedFlag | Whether package is upgraded. | unknown | + +## Playbook Image + +--- + +![AWS - Package Upgrade](../doc_files/AWS_-_Package_Upgrade.png) diff --git a/Packs/AWS_SystemManager/ReleaseNotes/1_0_2.md b/Packs/AWS_SystemManager/ReleaseNotes/1_0_2.md new file mode 100644 index 000000000000..85ba1b493ee7 --- /dev/null +++ b/Packs/AWS_SystemManager/ReleaseNotes/1_0_2.md @@ -0,0 +1,19 @@ +## AWS Systems Manager + +#### Playbooks + +##### New: AWS - Package Upgrade + +New: This playbook upgrades supported package on an AWS EC2 instance using AWS Systems manager. + +#### Scripts + +##### New: AWSPackageUpgrade + +New: This is an AWS script that upgrades a package on the AWS EC2 instance using AWS Systems manager. + +#### Integrations + +##### AWS - System Manager + +- Updated the Docker image to: *demisto/boto3py3:1.0.0.100294*. diff --git a/Packs/AWS_SystemManager/Scripts/AWSPackageUpgrade/AWSPackageUpgrade.py b/Packs/AWS_SystemManager/Scripts/AWSPackageUpgrade/AWSPackageUpgrade.py new file mode 100644 index 000000000000..07b90352e261 --- /dev/null +++ b/Packs/AWS_SystemManager/Scripts/AWSPackageUpgrade/AWSPackageUpgrade.py @@ -0,0 +1,203 @@ +import demistomock as demisto # noqa: F401 +from CommonServerPython import * # noqa: F401 + + +from typing import Any, Dict +import traceback +import json + + +ROLE_SESSION_NAME = "xsoar-session" + + +def upgrade_package_on_instance( + instance_id: str, + asm_rule_id: str, + region: str, + assume_role_arn: str +) -> dict: + """ + Upgrade a specified package on an AWS EC2 instance using AWS SSM. + + Args: + instance_id (str): The ID of the instance where the package will be upgraded. + asm_rule_id (str): The ID of the ASM rule that specifies the package to be upgraded. + region (str): The AWS region where the instance is located. + assume_role_arn (str): The AWS IAM role arn that will be assumed. + + Returns: + dict: A dictionary with the keys 'run_command_flag' indicating if the command + was run successfully, and 'run_command_output' containing the output of the command or error message. + """ + + output_run_command_dict = {"run_command_flag": True, "run_command_output": ""} + + asm_rule_package_dict = { + "InsecureOpenSSH": { + "Linux Ubuntu": r"set -e; apt-get update -y;\ +NEEDRESTART_MODE=a apt install tar wget libssl-dev gcc g++ gdb cpp make \ +cmake libtool libc6 autoconf automake pkg-config build-essential gettext libz-dev -y;\ +wget -c https://cdn.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-9.7p1.tar.gz;\ +tar -xvf openssh-9.7p1.tar.gz; cd openssh-9.7p1; ./configure; make; make install;\ +cd ..; rm openssh-9.7p1.tar.gz; rm -r openssh-9.7p1" + } + } + + # Check if Package upgrade is supported for the ASM Rule + if asm_rule_id not in asm_rule_package_dict.keys() and output_run_command_dict.get( + "run_command_flag" + ): + output_run_command_dict["run_command_flag"] = False + output_run_command_dict["run_command_output"] = ( + "Package upgrade is not supported for the ASM Rule ID." + ) + return output_run_command_dict + + # Get the instance information + cmd_args = {"instance_id": instance_id, "type_name": "Instance Information"} + if len(assume_role_arn) > 0: + cmd_args.update({"roleArn": assume_role_arn, "roleSessionName": ROLE_SESSION_NAME}) + instance_info = demisto.executeCommand("aws-ssm-inventory-entry-list", cmd_args) + + if "Invalid instance id" in instance_info[0].get( + "Contents" + ) and output_run_command_dict.get("run_command_flag"): + output_run_command_dict["run_command_flag"] = False + output_run_command_dict["run_command_output"] = "Invalid instance id." + return output_run_command_dict + + if ( + "Entries" in instance_info[0].get("Contents") + and len(instance_info[0].get("Contents").get("Entries")) == 0 + and output_run_command_dict.get("run_command_flag") + ): + output_run_command_dict["run_command_flag"] = False + output_run_command_dict["run_command_output"] = "Instance does not exist." + return output_run_command_dict + + instance_info_dict = {} + if output_run_command_dict.get("run_command_flag"): + instance_info_dict = instance_info[0].get("Contents").get("Entries")[0] + + if instance_info_dict.get("InstanceStatus") != "Active": + output_run_command_dict["run_command_flag"] = False + output_run_command_dict["run_command_output"] = ( + "Instance status is not Active. Check SSM agent on the instance." + ) + return output_run_command_dict + + if output_run_command_dict.get("run_command_flag"): + # Check if Package upgrade is supported for the OS + os = ( + instance_info_dict.get("PlatformType", "") + + " " + + instance_info_dict.get("PlatformName", "") + ) + if os not in asm_rule_package_dict.get(asm_rule_id, {}).keys(): + output_run_command_dict["run_command_flag"] = False + output_run_command_dict["run_command_output"] = ( + "Package upgrade is not supported for the OS." + ) + return output_run_command_dict + + if output_run_command_dict.get("run_command_flag"): + # Determine Command for the OS + command = asm_rule_package_dict.get(asm_rule_id, {}).get( + instance_info_dict.get("PlatformType", "") + + " " + + instance_info_dict.get("PlatformName", "") + ) + + parameters = { + "commands": [command], + "workingDirectory": [""], + "executionTimeout": ["3600"], + } + + cmd_args = { + "document_name": "AWS-RunShellScript", + "target_key": "Instance Ids", + "target_values": instance_id, + "parameters": json.dumps(parameters), + "region": region, + } + if len(assume_role_arn) > 0: + cmd_args.update( + {"roleArn": assume_role_arn, "roleSessionName": ROLE_SESSION_NAME} + ) + output = demisto.executeCommand("aws-ssm-command-run", cmd_args) + output_run_command_dict["run_command_output"] = ( + "AWS SSM Command run initiated successfully." + ) + output_run_command_dict["run_command_id"] = ( + output[0].get("Contents").get("CommandId") + ) + + return output_run_command_dict + + +def aws_package_upgrade(args: Dict[str, Any]) -> CommandResults: + """ + Initiates an upgrade of a software package on a specified AWS EC2 instance. + + This function takes arguments from a command, passes them to the + upgrade_package_on_instance function, and returns a CommandResults object + containing the results of the attempted package upgrade. + + Args: + args (Dict[str, Any]): A dictionary containing: + - instance_id (str): The ID of the EC2 instance where the package will be upgraded. + - asm_rule_id (str): The ID of the ASM rule that specifies the package to be upgraded. + - version (str): The version of the package to install. + - region (str, optional): The AWS region where the instance is located. If not specified, + it will default to the region of the running Lambda function or EC2 instance. + + Returns: + CommandResults: A CommandResults object with the results of the package upgrade operation. + + """ + instance_id = args.get("instance_id") + asm_rule_id = args.get("asm_rule_id") + region = args.get("region", None) + assume_role = args.get("assume_role", None) + account_id = args.get("account_id", None) + + instance_id = str(instance_id) if instance_id is not None else "" + asm_rule_id = str(asm_rule_id) if asm_rule_id is not None else "" + + assume_role_arn = '' + + if assume_role and account_id: + assume_role_arn = "arn:aws:iam::" + str(account_id) + ":role/" + str(assume_role) + + results = upgrade_package_on_instance( + instance_id, asm_rule_id, region, assume_role_arn + ) + command_results = CommandResults( + outputs=results, + outputs_prefix="awspackageupgrade", + raw_response=results, + readable_output=results.get("run_command_output"), + ) + return command_results + + +""" MAIN FUNCTION """ + + +def main(): + """ + main function + """ + try: + return_results(aws_package_upgrade(demisto.args())) + except Exception as ex: + demisto.error(traceback.format_exc()) # print the traceback + return_error(f"Failed to execute AWSPackageUpgrade. Error: {str(ex)}") + + +""" ENTRY POINT """ + + +if __name__ in ("__main__", "__builtin__", "builtins"): + main() diff --git a/Packs/AWS_SystemManager/Scripts/AWSPackageUpgrade/AWSPackageUpgrade.yml b/Packs/AWS_SystemManager/Scripts/AWSPackageUpgrade/AWSPackageUpgrade.yml new file mode 100644 index 000000000000..ad78af9b3e6b --- /dev/null +++ b/Packs/AWS_SystemManager/Scripts/AWSPackageUpgrade/AWSPackageUpgrade.yml @@ -0,0 +1,55 @@ +args: +- description: ID of the AWS Ec2 instance. + name: instance_id + required: true +- description: ASM alert rule ID. + name: asm_rule_id + required: true +- auto: PREDEFINED + description: Region of the EC2 instance. + name: region + predefined: + - us-east-1 + - us-east-2 + - us-west-1 + - us-west-2 + - ca-central-1 + - eu-west-1 + - eu-central-1 + - eu-west-2 + - ap-northeast-1 + - ap-northeast-2 + - ap-southeast-1 + - ap-southeast-2 + - ap-south-1 + - sa-east-1 + - eu-north-1 + - eu-west-3 + - us-gov-east-1 + - us-gov-west-1 + required: true +- description: Name of an AWS role to assume (should be the same for all organizations). + name: assume_role +- description: AWS account ID. + name: account_id +comment: This is an AWS script that upgrades a package on the AWS EC2 instance using AWS Systems manager. +commonfields: + id: AWSPackageUpgrade + version: -1 +dockerimage: demisto/python3:3.10.14.92207 +enabled: true +engineinfo: {} +name: AWSPackageUpgrade +outputs: +- contextPath: awspackageupgrade + description: The command ID of the command initiated by the AWS SSM command. +runas: DBotWeakRole +runonce: false +script: '' +scripttarget: 0 +subtype: python3 +tags: [] +type: python +fromversion: 6.10.0 +tests: +- No tests (auto formatted) diff --git a/Packs/AWS_SystemManager/Scripts/AWSPackageUpgrade/AWSPackageUpgrade_test.py b/Packs/AWS_SystemManager/Scripts/AWSPackageUpgrade/AWSPackageUpgrade_test.py new file mode 100644 index 000000000000..6d4e01509413 --- /dev/null +++ b/Packs/AWS_SystemManager/Scripts/AWSPackageUpgrade/AWSPackageUpgrade_test.py @@ -0,0 +1,204 @@ +import demistomock as demisto # noqa: F401 + + +def test_upgrade_package_on_instance_invalid_asmruleid(mocker): + """ + Test the upgrade_package_on_instance function with an invalid ASM rule ID. + Expected result: A dictionary with run_command_flag set to False and an + appropriate error message. + """ + from AWSPackageUpgrade import upgrade_package_on_instance + + args = { + "instance_id": "ou-2222-22222222", + "asm_rule_id": "fake", + "region": "region", + "assume_role_arn": "None" + } + result = upgrade_package_on_instance(**args) + assert result == { + "run_command_flag": False, + "run_command_output": "Package upgrade is not supported for the ASM Rule ID.", + } + + +def test_upgrade_package_on_instance_invalid_instanceid(mocker): + """ + Test the upgrade_package_on_instance function with an invalid instance ID. + Expected result: A dictionary with run_command_flag set to False and an + appropriate error message. + """ + from AWSPackageUpgrade import upgrade_package_on_instance + + def executeCommand(name, args): + if name == "aws-ssm-inventory-entry-list": + return [{"Contents": "Invalid instance id and Does not exist"}] + + mocker.patch.object(demisto, "executeCommand", side_effect=executeCommand) + + args = { + "instance_id": "ou-2222-22222222", + "asm_rule_id": "InsecureOpenSSH", + "region": "region", + "assume_role_arn": "None" + } + result = upgrade_package_on_instance(**args) + assert result == { + "run_command_flag": False, + "run_command_output": "Invalid instance id.", + } + + +def test_upgrade_package_on_instance_no_instance(mocker): + """ + Test the upgrade_package_on_instance function with a non-existent instance. + Expected result: A dictionary with run_command_flag set to False and an appropriate error message. + """ + from AWSPackageUpgrade import upgrade_package_on_instance + + def executeCommand(name, args): + if name == "aws-ssm-inventory-entry-list": + return [{"Contents": {"Entries": []}}] + + mocker.patch.object(demisto, "executeCommand", side_effect=executeCommand) + + args = { + "instance_id": "ou-2222-22222222", + "asm_rule_id": "InsecureOpenSSH", + "region": "region", + "assume_role_arn": "None" + } + result = upgrade_package_on_instance(**args) + assert result == { + "run_command_flag": False, + "run_command_output": "Instance does not exist.", + } + + +def test_upgrade_package_on_instance_inactive(mocker): + """ + Test the upgrade_package_on_instance function with an inactive instance. + Expected result: A dictionary with run_command_flag set to False and an appropriate error message. + """ + from AWSPackageUpgrade import upgrade_package_on_instance + + def executeCommand(name, args): + if name == "aws-ssm-inventory-entry-list": + return [{"Contents": {"Entries": [{"InstanceStatus": "Inactive"}]}}] + + mocker.patch.object(demisto, "executeCommand", side_effect=executeCommand) + + args = { + "instance_id": "ou-2222-22222222", + "asm_rule_id": "InsecureOpenSSH", + "region": "region", + "assume_role_arn": "None" + } + result = upgrade_package_on_instance(**args) + assert result == { + "run_command_flag": False, + "run_command_output": "Instance status is not Active. Check SSM agent on the instance.", + } + + +def test_upgrade_package_on_instance_no_package(mocker): + """ + Test the upgrade_package_on_instance function with an unsupported operating system for package upgrade. + Expected result: A dictionary with run_command_flag set to False and an appropriate error message. + """ + from AWSPackageUpgrade import upgrade_package_on_instance + + def executeCommand(name, args): + if name == "aws-ssm-inventory-entry-list": + return [ + { + "Contents": { + "Entries": [ + { + "InstanceStatus": "Active", + "PlatformType": "Linux", + "PlatformName": "Not-Ubuntu", + } + ] + } + } + ] + + mocker.patch.object(demisto, "executeCommand", side_effect=executeCommand) + + args = { + "instance_id": "ou-2222-22222222", + "asm_rule_id": "InsecureOpenSSH", + "region": "region", + "assume_role_arn": "None" + } + result = upgrade_package_on_instance(**args) + assert result == { + "run_command_flag": False, + "run_command_output": "Package upgrade is not supported for the OS.", + } + + +def test_upgrade_package_on_instance_package(mocker): + """ + Test the upgrade_package_on_instance function with a valid instance and package upgrade scenario. + Expected result: A dictionary with run_command_flag set to True and a CommandId. + """ + from AWSPackageUpgrade import upgrade_package_on_instance + + def executeCommand(name, args): + if name == "aws-ssm-inventory-entry-list": + return [ + { + "Contents": { + "Entries": [ + { + "InstanceStatus": "Active", + "PlatformType": "Linux", + "PlatformName": "Ubuntu", + } + ] + } + } + ] + elif name == "aws-ssm-command-run": + return [{"Contents": {"CommandId": "123"}}] + + mocker.patch.object(demisto, "executeCommand", side_effect=executeCommand) + + args = { + "instance_id": "ou-2222-22222222", + "asm_rule_id": "InsecureOpenSSH", + "region": "region", + "assume_role_arn": "None" + } + result = upgrade_package_on_instance(**args) + assert result == { + "run_command_flag": True, + "run_command_output": "AWS SSM Command run initiated successfully.", + "run_command_id": "123" + } + + +def test_aws_package_upgrade(mocker): + """ + Test the aws_package_upgrade function with a mocked upgrade_package_on_instance function. + Expected result: A CommandResults object with the correct output and prefix. + """ + from AWSPackageUpgrade import aws_package_upgrade + + mock_dict = {"run_command_flag": True, "run_command_output": "123"} + + mocker.patch( + "AWSPackageUpgrade.upgrade_package_on_instance", return_value=mock_dict + ) + + command_results = aws_package_upgrade( + {"instance_id": "123", "asm_rule_id": "InsecureOpenSSH", "region": "us-east"} + ) + + assert command_results.outputs_prefix == "awspackageupgrade" + assert command_results.outputs == { + "run_command_flag": True, + "run_command_output": "123", + } diff --git a/Packs/AWS_SystemManager/Scripts/AWSPackageUpgrade/README.md b/Packs/AWS_SystemManager/Scripts/AWSPackageUpgrade/README.md new file mode 100644 index 000000000000..f55c132279f1 --- /dev/null +++ b/Packs/AWS_SystemManager/Scripts/AWSPackageUpgrade/README.md @@ -0,0 +1,30 @@ +This is an AWS script that upgrades a package on the AWS EC2 instance using AWS Systems manager. + +## Script Data + +--- + +| **Name** | **Description** | +| --- | --- | +| Script Type | python3 | +| Cortex XSOAR Version | 6.10.0 | + +## Inputs + +--- + +| **Argument Name** | **Description** | +| --- | --- | +| instance_id | ID of the AWS Ec2 instance. | +| asm_rule_id | ASM alert rule ID. | +| region | Region of the EC2 instance. | +| assume_role | Name of an AWS role to assume \(should be the same for all organizations\). | +| account_id | AWS account ID. | + +## Outputs + +--- + +| **Path** | **Description** | **Type** | +| --- | --- | --- | +| awspackageupgrade | The command ID of the command initiated by the AWS SSM command. | Unknown | diff --git a/Packs/AWS_SystemManager/doc_files/AWSPackageUpgrade.png b/Packs/AWS_SystemManager/doc_files/AWSPackageUpgrade.png new file mode 100644 index 000000000000..a76d8b4c4ffd Binary files /dev/null and b/Packs/AWS_SystemManager/doc_files/AWSPackageUpgrade.png differ diff --git a/Packs/AWS_SystemManager/doc_files/AWS_-_Package_Upgrade.png b/Packs/AWS_SystemManager/doc_files/AWS_-_Package_Upgrade.png new file mode 100644 index 000000000000..ce614a108225 Binary files /dev/null and b/Packs/AWS_SystemManager/doc_files/AWS_-_Package_Upgrade.png differ diff --git a/Packs/AWS_SystemManager/pack_metadata.json b/Packs/AWS_SystemManager/pack_metadata.json index a69c624b6f68..f8b3c9cbdb8a 100644 --- a/Packs/AWS_SystemManager/pack_metadata.json +++ b/Packs/AWS_SystemManager/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS Systems Manager", "description": "AWS Systems Manager is the operations hub for your AWS applications and resources and a secure end-to-end management solution for hybrid cloud environments that enables safe and secure operations at scale.", "support": "xsoar", - "currentVersion": "1.0.1", + "currentVersion": "1.0.2", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", @@ -16,6 +16,7 @@ ], "marketplaces": [ "xsoar", - "marketplacev2" + "marketplacev2", + "xpanse" ] } \ No newline at end of file diff --git a/Packs/AWS_WAF/Integrations/AWSWAF/AWSWAF.yml b/Packs/AWS_WAF/Integrations/AWSWAF/AWSWAF.yml index 3aedaa258458..8008612bfdf7 100644 --- a/Packs/AWS_WAF/Integrations/AWSWAF/AWSWAF.yml +++ b/Packs/AWS_WAF/Integrations/AWSWAF/AWSWAF.yml @@ -39,6 +39,20 @@ configuration: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 section: Connect - display: Role Session Duration name: sessionDuration @@ -145,6 +159,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 - name: tag_key description: A comma-separated list of the keys of the tags to associate with the IP set. isArray: true @@ -205,6 +233,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 description: Get a specific IP set. name: aws-waf-ip-set-get outputs: @@ -268,6 +310,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 - auto: PREDEFINED defaultValue: 'false' description: Whether to overwrite the existing addresses. @@ -312,6 +368,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 description: Lists IP sets. name: aws-waf-ip-set-list outputs: @@ -368,6 +438,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 description: Delete a specific IP set. name: aws-waf-ip-set-delete - arguments: @@ -409,6 +493,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 - name: tag_key description: A comma-separated list of the keys of the tags to associate with the regex set. isArray: true @@ -469,6 +567,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 description: Get a specific regex set. name: aws-waf-regex-set-get outputs: @@ -529,6 +641,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 - auto: PREDEFINED defaultValue: 'false' description: Whether to overwrite the existing regex patterns. @@ -573,6 +699,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 description: Lists regex sets. name: aws-waf-regex-set-list outputs: @@ -629,6 +769,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 description: Delete a specific regex set. name: aws-waf-regex-set-delete - arguments: @@ -666,6 +820,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 description: Lists rule groups. name: aws-waf-rule-group-list outputs: @@ -722,6 +890,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 description: Get a specific rule group. name: aws-waf-rule-group-get outputs: @@ -776,6 +958,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 description: Delete a specific rule group. name: aws-waf-rule-group-delete - arguments: @@ -832,6 +1028,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 - name: tag_key description: A comma-separated list of the keys of the tags to associate with the rule group. isArray: true @@ -895,6 +1105,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 - name: priority description: The rule priority. required: true @@ -960,6 +1184,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 - name: priority description: The rule priority. required: true @@ -1018,6 +1256,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 - name: priority description: The rule priority. required: true @@ -1132,6 +1384,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 description: Delete a specific rule from a rule group. name: aws-waf-rule-delete - arguments: @@ -1173,6 +1439,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 - name: ip_set_arn description: The IP set ARN. You can get this value by running the aws-waf-ip-set-list command. isArray: true @@ -1224,6 +1504,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 - name: country_codes description: A comma-separated list of two-character country codes. isArray: true @@ -1275,6 +1569,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 - name: match_type description: The string match type. required: true @@ -1382,6 +1690,20 @@ script: - eu-west-3 - us-gov-east-1 - us-gov-west-1 + - af-south-1 + - ap-east-1 + - ap-east-2 + - ap-northeast-3 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - cn-north-1 + - cn-northwest-1 + - eu-central-2 + - eu-south-1 + - eu-south-2 + - me-south-1 + - me-central-1 - name: statement_json description: A generic JSON statement to add to the rule. You can get the templates by running the aws-waf-statement-json-template-get command. isArray: true @@ -1421,7 +1743,7 @@ script: script: '-' type: python subtype: python3 - dockerimage: demisto/boto3py3:1.0.0.87902 + dockerimage: demisto/boto3py3:1.0.0.101500 fromversion: 6.5.0 tests: - No tests (auto formatted) diff --git a/Packs/AWS_WAF/ReleaseNotes/1_0_7.md b/Packs/AWS_WAF/ReleaseNotes/1_0_7.md new file mode 100644 index 000000000000..9a8d4892c37b --- /dev/null +++ b/Packs/AWS_WAF/ReleaseNotes/1_0_7.md @@ -0,0 +1,7 @@ + +#### Integrations + +##### AWS-WAF + +- Fixed an issue where some regions were missing from the *Regions* parameter. +- Updated the Docker image to *demisto/boto3py3:1.0.0.101500*. diff --git a/Packs/AWS_WAF/pack_metadata.json b/Packs/AWS_WAF/pack_metadata.json index b6febbd34c7c..9e22310a8d32 100644 --- a/Packs/AWS_WAF/pack_metadata.json +++ b/Packs/AWS_WAF/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS WAF", "description": "Amazon Web Services Web Application Firewall (WAF)", "support": "xsoar", - "currentVersion": "1.0.6", + "currentVersion": "1.0.7", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", diff --git a/Packs/AccentureCTI/Integrations/ACTIIndicatorQuery/ACTIIndicatorQuery.yml b/Packs/AccentureCTI/Integrations/ACTIIndicatorQuery/ACTIIndicatorQuery.yml index d67fef40623a..fed0884cddf8 100644 --- a/Packs/AccentureCTI/Integrations/ACTIIndicatorQuery/ACTIIndicatorQuery.yml +++ b/Packs/AccentureCTI/Integrations/ACTIIndicatorQuery/ACTIIndicatorQuery.yml @@ -67,7 +67,7 @@ script: description: The indicator type. type: String - contextPath: DBotScore.Vendor - description: The vendor that was used to calculate the score. + description: The vendor used to calculate the score. type: String - contextPath: DBotScore.Score description: The actual score. @@ -400,7 +400,7 @@ script: - contextPath: DBotScore.Score description: The actual score. type: String - dockerimage: demisto/python3:3.10.14.91134 + dockerimage: demisto/python3:3.10.14.99865 runonce: false script: '-' subtype: python3 diff --git a/Packs/AccentureCTI/Integrations/ACTIIndicatorQuery/README.md b/Packs/AccentureCTI/Integrations/ACTIIndicatorQuery/README.md index 9e416300b777..93f9e1c8a260 100644 --- a/Packs/AccentureCTI/Integrations/ACTIIndicatorQuery/README.md +++ b/Packs/AccentureCTI/Integrations/ACTIIndicatorQuery/README.md @@ -16,10 +16,14 @@ This integration was integrated and tested with version 2.93.0 of ACTI | Use system proxy settings | | False | 4. Click **Test** to validate the URLs, token, and connection. + ## Commands + You can execute these commands from the Cortex XSOAR CLI, as part of an automation, or in a playbook. After you successfully execute a command, a DBot message appears in the War Room with the command details. + ### ip + *** Checks the reputation of the given IP address. @@ -27,6 +31,7 @@ Checks the reputation of the given IP address. #### Base Command `ip` + #### Input | **Argument Name** | **Description** | **Required** | @@ -44,13 +49,16 @@ Checks the reputation of the given IP address. | DBotScore.Indicator | String | The indicator that was tested. | | DBotScore.Reliability | String | Reliability of the source providing the intelligence data. | | DBotScore.Type | String | The indicator type. | -| DBotScore.Vendor | String | The vendor that was used to calculate the score. | +| DBotScore.Vendor | String | The vendor used to calculate the score. | | DBotScore.Score | String | The actual score. | #### Command Example + ```!ip ip=0.0.0.0``` + #### Context Example + ```json { "DBotScore": { @@ -69,12 +77,14 @@ Checks the reputation of the given IP address. #### Human Readable Output >### Results + >|Confidence|DbotReputation|LastPublished|Name|ThreatTypes|TypeOfUse| >|---|---|---|---|---|---| ->| 0 | 2 | 2018-04-25 14:20:30 | 0.0.0.0 | Cyber Espionage | MALWARE_DOWNLOAD,
MALWARE_C2 | +>| 0 | 2 | 2018-04-25 14:20:30 | 0.0.0.0 | Cyber Espionage | MALWARE_DOWNLOAD, MALWARE_C2 | ### domain + *** Checks the reputation of the given domain. @@ -82,6 +92,7 @@ Checks the reputation of the given domain. #### Base Command `domain` + #### Input | **Argument Name** | **Description** | **Required** | @@ -104,8 +115,11 @@ Checks the reputation of the given domain. #### Command Example + ```!domain domain=example.org``` + #### Context Example + ```json { "DBotScore": { @@ -124,12 +138,14 @@ Checks the reputation of the given domain. #### Human Readable Output >### Results + >|Confidence|DbotReputation|LastPublished|Name|ThreatTypes|TypeOfUse| >|---|---|---|---|---|---| >| 50 | 2 | 2019-09-18 15:56:49 | example.org | Cyber Crime | MALWARE_C2 | ### url + *** Checks the reputation of the given URL. @@ -137,6 +153,7 @@ Checks the reputation of the given URL. #### Base Command `url` + #### Input | **Argument Name** | **Description** | **Required** | @@ -159,8 +176,11 @@ Checks the reputation of the given URL. #### Command Example + ```!url url=http://example.com``` + #### Context Example + ```json { "DBotScore": { @@ -179,12 +199,14 @@ Checks the reputation of the given URL. #### Human Readable Output >### Results + >|Confidence|DbotReputation|LastPublished|Name|ThreatTypes|TypeOfUse| >|---|---|---|---|---|---| ->| 50 | 2 | 2020-09-16 20:29:35 | http://example.com | Cyber Crime | MALWARE_C2 | +>| 50 | 2 | 2020-09-16 20:29:35 | | Cyber Crime | MALWARE_C2 | ### acti-get-ioc-by-uuid + *** Checks reputation of a specific indicator(URL/IP/Domain) uuid. @@ -192,6 +214,7 @@ Checks reputation of a specific indicator(URL/IP/Domain) uuid. #### Base Command `acti-get-ioc-by-uuid` + #### Input | **Argument Name** | **Description** | **Required** | @@ -220,8 +243,11 @@ Checks reputation of a specific indicator(URL/IP/Domain) uuid. #### Command Example + ```!acti-get-ioc-by-uuid uuid=xxxx``` + #### Context Example + ```json { "DBotScore": { @@ -240,12 +266,14 @@ Checks reputation of a specific indicator(URL/IP/Domain) uuid. #### Human Readable Output >### Results + >|Confidence|DbotReputation|LastPublished|Name|ThreatTypes|TypeOfUse| >|---|---|---|---|---|---| >| 0 | 2 | 2017-01-11 20:56:22 | example.org | Cyber Espionage | MALWARE_C2 | ### acti-get-fundamentals-by-uuid + *** Checks reputation of a specific Malware Family/ Threat Campaign/ Threat Group/ Threat Actor. @@ -253,6 +281,7 @@ Checks reputation of a specific Malware Family/ Threat Campaign/ Threat Group/ T #### Base Command `acti-get-fundamentals-by-uuid` + #### Input | **Argument Name** | **Description** | **Required** | @@ -308,8 +337,11 @@ Checks reputation of a specific Malware Family/ Threat Campaign/ Threat Group/ T #### Command Example + ```!acti-get-fundamentals-by-uuid uuid=7q2b129s-6421-4e22-a276-22be5f76cba8``` + #### Context Example + ```json { "DBotScore": { @@ -336,7 +368,8 @@ Checks reputation of a specific Malware Family/ Threat Campaign/ Threat Group/ T #### Human Readable Output >### Danabot ->For more insight click: https://intelgraph.idefense.com/#/node/malware_family/view/7q2b129s-6421-4e22-a276-22be5f76cba8 + +>For more insight click: > >| CreatedOn | DBotReputation | IndexTimestamp | LastModified | LastPublished | Name | Severity | ThreatTypes | Type | >|---|---|---|---|---|---|---|---|---| @@ -344,6 +377,7 @@ Checks reputation of a specific Malware Family/ Threat Campaign/ Threat Group/ T ### acti-getThreatIntelReport + *** Fetches Intelligence Alerts & Intelligence Reports. @@ -351,6 +385,7 @@ Fetches Intelligence Alerts & Intelligence Reports. #### Base Command `acti-getThreatIntelReport` + #### Input | **Argument Name** | **Description** | **Required** | @@ -374,7 +409,7 @@ Fetches Intelligence Alerts & Intelligence Reports. | IAIR.type | String | The type of report i.e. an IA/IR , for example, 'intelligence_alert' | | IAIR.uuid | String | The uuid of the IA/IR, for example, '8b8b48f1-92a0-411a-a073-3241f6819f8b' | | IAIR.analysis | String | The analysis of the IA/IR, for example, 'COVID-19 Introduces Cyberthreat Opportunities...' | -| IAIR.attachment_links | String | Provides with the document links related to the Intelligence Alert. This field is specific to Intelligence Alert, for example, 'https://intelgraph.idefense.com/rest/files/download/...' | +| IAIR.attachment_links | String | Provides with the document links related to the Intelligence Alert. This field is specific to Intelligence Alert, for example, '...' | | IAIR.severity | String | Provides severity rating. This field is specific to Intelligence Alert, for example, '4' | | IAIR.mitigation | String | Provides info on how to mitigate. This field is specific to Intelligence Alert, for example, '\#\# Expert, Experienced Advice Will be CriticalTo minimize targeting opportunities...' | | IAIR.conclusion | String | Provides conclusion of the report. This field is specific to Intelligence Report | @@ -390,8 +425,11 @@ Fetches Intelligence Alerts & Intelligence Reports. #### Command Example + ```!acti-getThreatIntelReport uuid=8b8b48f1-92a0-411a-a073-3241f6819f8b``` + #### Context Example + ```json { "DBotScore": { @@ -426,6 +464,6 @@ Fetches Intelligence Alerts & Intelligence Reports. #### Human Readable Output ->Report has been fetched! ->
UUID: 8b8b48f1-92a0-411a-a073-3241f6819f8b ->
Link to view report: https://intelgraph.idefense.com/#/node/intelligence_alert/view/8b8b48f1-92a0-411a-a073-3241f6819f8b \ No newline at end of file +Report has been fetched! +UUID: 8b8b48f1-92a0-411a-a073-3241f6819f8b +Link to view report: \ No newline at end of file diff --git a/Packs/AccentureCTI/ReleaseNotes/2_2_36.md b/Packs/AccentureCTI/ReleaseNotes/2_2_36.md new file mode 100644 index 000000000000..32ab0625b1cd --- /dev/null +++ b/Packs/AccentureCTI/ReleaseNotes/2_2_36.md @@ -0,0 +1,7 @@ + +#### Integrations + +##### ACTI Indicator Query + +- Updated the Docker image to: *demisto/python3:3.10.14.99865*. +- Documentation and metadata improvements. diff --git a/Packs/AccentureCTI/doc_files/161069052-feb0b049-e189-42ea-bd3b-db96fdab09d8.png b/Packs/AccentureCTI/doc_files/161069052-feb0b049-e189-42ea-bd3b-db96fdab09d8.png new file mode 100644 index 000000000000..17d07751efa1 Binary files /dev/null and b/Packs/AccentureCTI/doc_files/161069052-feb0b049-e189-42ea-bd3b-db96fdab09d8.png differ diff --git a/Packs/AccentureCTI/doc_files/161070840-5246a1bc-b8e9-46f6-83d9-2136a2ee5ca5.png b/Packs/AccentureCTI/doc_files/161070840-5246a1bc-b8e9-46f6-83d9-2136a2ee5ca5.png new file mode 100644 index 000000000000..47692f0ff576 Binary files /dev/null and b/Packs/AccentureCTI/doc_files/161070840-5246a1bc-b8e9-46f6-83d9-2136a2ee5ca5.png differ diff --git a/Packs/AccentureCTI/doc_files/161071089-cbd8df9a-dbd3-4e51-a256-59002bcaad92.png b/Packs/AccentureCTI/doc_files/161071089-cbd8df9a-dbd3-4e51-a256-59002bcaad92.png new file mode 100644 index 000000000000..26562501bb91 Binary files /dev/null and b/Packs/AccentureCTI/doc_files/161071089-cbd8df9a-dbd3-4e51-a256-59002bcaad92.png differ diff --git a/Packs/AccentureCTI/doc_files/163229991-a9350f71-39f5-4cf2-a40c-f10b8a7a0777.png b/Packs/AccentureCTI/doc_files/163229991-a9350f71-39f5-4cf2-a40c-f10b8a7a0777.png new file mode 100644 index 000000000000..1f4e80ba2f12 Binary files /dev/null and b/Packs/AccentureCTI/doc_files/163229991-a9350f71-39f5-4cf2-a40c-f10b8a7a0777.png differ diff --git a/Packs/AccentureCTI/doc_files/163230245-bc862aca-9dc3-4eea-bca6-c50e311fe605.png b/Packs/AccentureCTI/doc_files/163230245-bc862aca-9dc3-4eea-bca6-c50e311fe605.png new file mode 100644 index 000000000000..e3364710905c Binary files /dev/null and b/Packs/AccentureCTI/doc_files/163230245-bc862aca-9dc3-4eea-bca6-c50e311fe605.png differ diff --git a/Packs/AccentureCTI/doc_files/164019981-8da85350-7c55-4c40-bd69-e1177119e64e.png b/Packs/AccentureCTI/doc_files/164019981-8da85350-7c55-4c40-bd69-e1177119e64e.png new file mode 100644 index 000000000000..9e0d35522f1f Binary files /dev/null and b/Packs/AccentureCTI/doc_files/164019981-8da85350-7c55-4c40-bd69-e1177119e64e.png differ diff --git a/Packs/AccentureCTI/doc_files/171863775-19f86d1d-e691-4d2d-bd20-f259f0747d52.png b/Packs/AccentureCTI/doc_files/171863775-19f86d1d-e691-4d2d-bd20-f259f0747d52.png new file mode 100644 index 000000000000..89caa463a9de Binary files /dev/null and b/Packs/AccentureCTI/doc_files/171863775-19f86d1d-e691-4d2d-bd20-f259f0747d52.png differ diff --git a/Packs/AccentureCTI/pack_metadata.json b/Packs/AccentureCTI/pack_metadata.json index 4043ce776587..c2e13af117c6 100644 --- a/Packs/AccentureCTI/pack_metadata.json +++ b/Packs/AccentureCTI/pack_metadata.json @@ -2,7 +2,7 @@ "name": "Accenture CTI v2", "description": "Accenture CTI provides intelligence regarding security threats and vulnerabilities.", "support": "partner", - "currentVersion": "2.2.35", + "currentVersion": "2.2.36", "author": "Accenture", "url": "https://www.accenture.com/us-en/services/security/cyber-defense", "email": "CTI.AcctManagement@accenture.com", diff --git a/Packs/AccentureCTI_Feed/Integrations/ACTIIndicatorFeed/ACTIIndicatorFeed.yml b/Packs/AccentureCTI_Feed/Integrations/ACTIIndicatorFeed/ACTIIndicatorFeed.yml index 260df6234623..da9498c580d8 100644 --- a/Packs/AccentureCTI_Feed/Integrations/ACTIIndicatorFeed/ACTIIndicatorFeed.yml +++ b/Packs/AccentureCTI_Feed/Integrations/ACTIIndicatorFeed/ACTIIndicatorFeed.yml @@ -74,7 +74,7 @@ configuration: type: 8 required: false - additionalinfo: How far back in time to go when performing the first fetch - defaultvalue: '14 days' + defaultvalue: 14 days display: First fetch timestamp (