diff --git a/.openpublishing.publish.config.json b/.openpublishing.publish.config.json index 67ab47fc058a..9a632bfb84a7 100644 --- a/.openpublishing.publish.config.json +++ b/.openpublishing.publish.config.json @@ -32,6 +32,18 @@ "need_preview_pull_request": true, "contribution_branch_mappings": {}, "dependent_repositories": [ + { + "path_to_root": "azure-dev-docs-pr", + "url": "https://github.com/MicrosoftDocs/azure-dev-docs-pr", + "branch": "main", + "branch_mapping": {} + }, + { + "path_to_root": "terraform_samples", + "url": "https://github.com/Azure/terraform", + "branch": "master", + "branch_mapping": {} + }, { "path_to_root": "quickstart-templates", "url": "https://github.com/Azure/azure-quickstart-templates", @@ -79,7 +91,7 @@ "url": "https://github.com/Azure-Samples/msdocs-storage-bind-function-service", "branch": "main", "branch_mapping": {} - }, + }, { "path_to_root": "azure_cli_scripts", "url": "https://github.com/Azure-Samples/azure-cli-samples", @@ -889,8 +901,13 @@ "url": "https://github.com/Azure-Samples/azure-sql-binding-func-dotnet-todo", "branch": "docs-snippets", "branch_mapping": {} + }, + { + "path_to_root": "ms-identity-node", + "url": "https://github.com/Azure-Samples/ms-identity-node", + "branch": "main", + "branch_mapping": {} } - ], "branch_target_mapping": { "live": ["Publish", "PDF"], diff --git a/.openpublishing.redirection.azure-monitor.json b/.openpublishing.redirection.azure-monitor.json index 83a19daf4a0a..2d48525503f6 100644 --- a/.openpublishing.redirection.azure-monitor.json +++ b/.openpublishing.redirection.azure-monitor.json @@ -162,7 +162,7 @@ }, { "source_path_from_root": "/articles/azure-monitor/alerts/alerts-metric-overview.md" , - "redirect_url": "/azure/azure-monitor/alerts/alert-types.md#metric-alerts", + "redirect_url": "/azure/azure-monitor/alerts/alert-types.md", "redirect_document_id": false }, { @@ -172,12 +172,17 @@ }, { "source_path_from_root": "/articles/azure-monitor/alerts/alerts-unified-log.md" , - "redirect_url": "/azure/azure-monitor/alerts/alert-types.md#log-alerts", + "redirect_url": "/azure/azure-monitor/alerts/alert-types.md", "redirect_document_id": false }, { "source_path_from_root": "/articles/azure-monitor/alerts/activity-log-alerts.md" , - "redirect_url": "/azure/azure-monitor/alerts/alert-types.md#activity-log-alerts", + "redirect_url": "/azure/azure-monitor/alerts/alert-types.md", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/insights/data-explorer.md" , + "redirect_url": "/azure/data-explorer/data-explorer-insights", "redirect_document_id": false } ] diff --git a/.openpublishing.redirection.json b/.openpublishing.redirection.json index 27bcbcde2b8e..0248001462b1 100644 --- a/.openpublishing.redirection.json +++ b/.openpublishing.redirection.json @@ -2718,6 +2718,11 @@ "redirect_url": "/azure/aks/load-balancer-standard", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/aks/keda.md", + "redirect_url": "/azure/aks/keda-about", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/analysis-services/analysis-services-create-model-portal.md", "redirect_url": "/azure/analysis-services/analysis-services-overview", @@ -43274,6 +43279,11 @@ "redirect_url": "/azure/cognitive-services/translator/custom-translator/key-terms", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/applied-ai-services/form-recognizer/generate-sas-tokens.md", + "redirect_url": "/azure/applied-ai-services/form-recognizer/create-sas-tokens", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/cognitive-services/language-service/text-summarization/how-to/call-api.md", "redirect_url": "/azure/cognitive-services/language-service/summarization/how-to/document-summarization", diff --git a/articles/active-directory-b2c/partner-gallery.md b/articles/active-directory-b2c/partner-gallery.md index 04f79ea04c78..1332dbfb451c 100644 --- a/articles/active-directory-b2c/partner-gallery.md +++ b/articles/active-directory-b2c/partner-gallery.md @@ -21,7 +21,7 @@ Our ISV partner network extends our solution capabilities to help you build seam To be considered into this sample documentation, submit your application request in the [Microsoft Application Network portal](https://microsoft.sharepoint.com/teams/apponboarding/Apps/SitePages/Default.aspx). For any additional questions, send an email to [SaaSApplicationIntegrations@service.microsoft.com](mailto:SaaSApplicationIntegrations@service.microsoft.com). >[!NOTE] ->The [Azure Active Directory B2C community site on GitHub](https://azure-ad-b2c.github.io/azureadb2ccommunity.io/) also provides sample custom policies from the community. +>The [Azure Active Directory B2C community site on GitHub](https://github.com/azure-ad-b2c/partner-integrations) also provides sample custom policies from the community. ## Identity verification and proofing diff --git a/articles/active-directory-b2c/secure-rest-api.md b/articles/active-directory-b2c/secure-rest-api.md index e91fa2c3bf02..11dcb9c561ec 100644 --- a/articles/active-directory-b2c/secure-rest-api.md +++ b/articles/active-directory-b2c/secure-rest-api.md @@ -484,9 +484,22 @@ The following XML snippet is an example of a RESTful technical profile configure ``` -::: zone-end +Add the validation technical profile reference to the sign up technical profile, which calls the `REST-AcquireAccessToken`. This behavior means that Azure AD B2C moves on to create the account in the directory only after successful validation. + +For example: + ```XML + + .... + + .... + + ``` + + +::: zone-end + ## API key authentication ::: zone pivot="b2c-user-flow" diff --git a/articles/active-directory-b2c/whats-new-docs.md b/articles/active-directory-b2c/whats-new-docs.md index f39cea7f8c01..fa5ce2b39a44 100644 --- a/articles/active-directory-b2c/whats-new-docs.md +++ b/articles/active-directory-b2c/whats-new-docs.md @@ -15,6 +15,17 @@ manager: CelesteDG Welcome to what's new in Azure Active Directory B2C documentation. This article lists new docs that have been added and those that have had significant updates in the last three months. To learn what's new with the B2C service, see [What's new in Azure Active Directory](../active-directory/fundamentals/whats-new.md). + +## May 2022 + +### Updated articles + +- [Set redirect URLs to b2clogin.com for Azure Active Directory B2C](b2clogin.md) +- [Enable custom domains for Azure Active Directory B2C](custom-domain.md) +- [Configure xID with Azure Active Directory B2C for passwordless authentication](partner-xid.md) +- [UserJourneys](userjourneys.md) +- [Secure your API used an API connector in Azure AD B2C](secure-rest-api.md) + ## April 2022 ### New articles diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/TOC.yml b/articles/active-directory/cloud-infrastructure-entitlement-management/TOC.yml index 7dd76b378929..d0678307f7aa 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/TOC.yml +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/TOC.yml @@ -1,161 +1,161 @@ - - name: CloudKnox Permissions Management + - name: Permissions Management href: index.yml - name: Overview expanded: true items: - - name: What's CloudKnox Permissions Management? - href: cloudknox-overview.md + - name: What's Permissions Management? + href: overview.md - name: How-to guides expanded: true items: - - name: Onboard CloudKnox on the Azure AD tenant + - name: Onboard Permissions Management on the Azure AD tenant expanded: true items: - - name: Enable CloudKnox in your organization - href: cloudknox-onboard-enable-tenant.md + - name: Enable Permissions Management in your organization + href: onboard-enable-tenant.md - name: Onboard an AWS account - href: cloudknox-onboard-aws.md + href: onboard-aws.md - name: Onboard an Azure subscription - href: cloudknox-onboard-azure.MD + href: onboard-azure.MD - name: Onboard a GCP project - href: cloudknox-onboard-gcp.md + href: onboard-gcp.md - name: Enable or disable the controller after onboarding is complete - href: cloudknox-onboard-enable-controller-after-onboarding.md + href: onboard-enable-controller-after-onboarding.md - name: Add an account/ subscription/ project after onboarding is complete - href: cloudknox-onboard-add-account-after-onboarding.md + href: onboard-add-account-after-onboarding.md - name: View risk metrics in your authorization system expanded: false items: - name: View key statistics and data about your authorization system - href: cloudknox-ui-dashboard.md + href: ui-dashboard.md - name: View data about the activity in your authorization system - href: cloudknox-product-dashboard.md + href: product-dashboard.md - name: Configure settings for data collection expanded: false items: - name: View and configure settings for data collection - href: cloudknox-product-data-sources.md + href: product-data-sources.md - name: Display an inventory of created resources and licenses - href: cloudknox-product-data-inventory.md + href: product-data-inventory.md - name: Manage organizational and personal information expanded: false items: - name: View personal and organization information - href: cloudknox-product-account-settings.md + href: product-account-settings.md - name: View information about identities, resources, and tasks expanded: false items: - name: View analytic information with the Analytics dashboard - href: cloudknox-usage-analytics-home.md + href: usage-analytics-home.md - name: View analytic information about users - href: cloudknox-usage-analytics-users.md + href: usage-analytics-users.md - name: View analytic information about groups - href: cloudknox-usage-analytics-groups.md + href: usage-analytics-groups.md - name: View analytic information about active resources - href: cloudknox-usage-analytics-active-resources.md + href: usage-analytics-active-resources.md - name: View analytic information about active tasks - href: cloudknox-usage-analytics-active-tasks.md + href: usage-analytics-active-tasks.md - name: View analytic information about access keys - href: cloudknox-usage-analytics-access-keys.md + href: usage-analytics-access-keys.md - name: View analytic information about serverless functions - href: cloudknox-usage-analytics-serverless-functions.md + href: usage-analytics-serverless-functions.md - name: Manage roles/policies and permission requests expanded: false items: - name: View roles/policies and requests for permission in the Remediation dashboard - href: cloudknox-ui-remediation.md + href: ui-remediation.md - name: View information about roles/policies - href: cloudknox-howto-view-role-policy.md + href: how-to-view-role-policy.md - name: View information about active and completed tasks - href: cloudknox-ui-tasks.md + href: ui-tasks.md - name: Create a role/policy - href: cloudknox-howto-create-role-policy.md + href: how-to-create-role-policy.md - name: Clone a role/policy - href: cloudknox-howto-clone-role-policy.md + href: how-to-clone-role-policy.md - name: Modify a role/policy - href: cloudknox-howto-modify-role-policy.md + href: how-to-modify-role-policy.md - name: Delete a role/policy - href: cloudknox-howto-delete-role-policy.md + href: how-to-delete-role-policy.md - name: Attach and detach policies for AWS identities - href: cloudknox-howto-attach-detach-permissions.md + href: how-to-attach-detach-permissions.md - name: Add and remove roles and tasks for Azure and GCP identities - href: cloudknox-howto-add-remove-role-task.md + href: how-to-add-remove-role-task.md - name: Revoke access to high-risk and unused tasks or assign read-only status - href: cloudknox-howto-revoke-task-readonly-status.md + href: how-to-revoke-task-readonly-status.md - name: Create or approve a request for permissions - href: cloudknox-howto-create-approve-privilege-request.md + href: how-to-create-approve-privilege-request.md - name: Manage users, roles, and their access levels expanded: false items: - name: Manage users and groups - href: cloudknox-ui-user-management.md + href: ui-user-management.md # - name: Define and manage users, roles, and access levels - # href: cloudknox-product-define-permission-levels.md + # href: product-define-permission-levels.md - name: Select group-based permissions settings - href: cloudknox-howto-create-group-based-permissions.md + href: how-to-create-group-based-permissions.md - name: Use queries to view information about user access expanded: false items: - name: Use queries to see how users access information - href: cloudknox-ui-audit-trail.md + href: ui-audit-trail.md - name: Create a custom query - href: cloudknox-howto-create-custom-queries.md + href: how-to-create-custom-queries.md - name: Generate an on-demand report from a query - href: cloudknox-howto-audit-trail-results.md + href: how-to-audit-trail-results.md - name: Filter and query user activity - href: cloudknox-product-audit-trail.md + href: product-audit-trail.md - name: Set activity alerts and triggers expanded: false items: - name: View information about activity triggers - href: cloudknox-ui-triggers.md + href: ui-triggers.md - name: Create and view activity alerts and alert triggers - href: cloudknox-howto-create-alert-trigger.md + href: how-to-create-alert-trigger.md - name: Create and view rule-based anomalies and anomaly triggers - href: cloudknox-product-rule-based-anomalies.md + href: product-rule-based-anomalies.md - name: Create and view statistical anomalies and anomaly triggers - href: cloudknox-product-statistical-anomalies.md + href: product-statistical-anomalies.md - name: Create and view permission analytics triggers - href: cloudknox-product-permission-analytics.md + href: product-permission-analytics.md - name: Manage rules for authorization systems expanded: false items: - name: View rules in the Autopilot dashboard - href: cloudknox-ui-autopilot.md + href: ui-autopilot.md - name: Create a rule - href: cloudknox-howto-create-rule.md + href: how-to-create-rule.md - name: Generate, view, and apply rule recommendations - href: cloudknox-howto-recommendations-rule.md + href: how-to-recommendations-rule.md - name: View notification settings for a rule - href: cloudknox-howto-notifications-rule.md + href: how-to-notifications-rule.md - name: Create and view reports expanded: false items: - name: View system reports in the Reports dashboard - href: cloudknox-product-reports.md + href: product-reports.md - name: View a list and description of system reports - href: cloudknox-all-reports.md + href: all-reports.md - name: Generate and view a system report - href: cloudknox-report-view-system-report.md + href: report-view-system-report.md - name: Create, view, and share a custom report - href: cloudknox-report-create-custom-report.md + href: report-create-custom-report.md - name: Generate and download the Permissions analytics report - href: cloudknox-product-permissions-analytics-reports.md + href: product-permissions-analytics-reports.md - name: Troubleshoot expanded: false items: - name: Troubleshoot issues - href: cloudknox-troubleshoot.md + href: troubleshoot.md - name: Training videos expanded: false items: - - name: Get started with CloudKnox training videos - href: cloudknox-training-videos.md + - name: Get started with Permissions Management training videos + href: training-videos.md - name: Reference expanded: false items: - name: FAQs - href: cloudknox-faqs.md + href: faqs.md - name: Glossary - href: cloudknox-multi-cloud-glossary.md + href: multi-cloud-glossary.md diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-all-reports.md b/articles/active-directory/cloud-infrastructure-entitlement-management/all-reports.md similarity index 85% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-all-reports.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/all-reports.md index 716f9029be38..ac4b7ff73a51 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-all-reports.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/all-reports.md @@ -1,6 +1,6 @@ --- -title: View a list and description of all system reports available in CloudKnox Permissions Management reports -description: View a list and description of all system reports available in CloudKnox Permissions Management. +title: View a list and description of all system reports available in Permissions Management reports +description: View a list and description of all system reports available in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,20 +15,20 @@ ms.author: kenwith # View a list and description of system reports > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some of the information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -CloudKnox Permissions Management (CloudKnox) has various types of system reports that capture specific sets of data. These reports allow management, auditors, and administrators to: +Permissions Management has various types of system reports that capture specific sets of data. These reports allow management, auditors, and administrators to: - Make timely decisions. - Analyze trends and system/user performance. - Identify trends in data and high risk areas so that management can address issues more quickly and improve their efficiency. -This article provides you with a list and description of the system reports available in CloudKnox. Depending on the report, you can download it in comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. +This article provides you with a list and description of the system reports available in Permissions Management. Depending on the report, you can download it in comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. ## Download a system report -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Systems Reports** subtab. +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Systems Reports** subtab. 1. In the **Report Name** column, find the report you want, and then select the down arrow to the right of the report name to download the report. Or, from the ellipses **(...)** menu, select **Download**. @@ -48,14 +48,14 @@ This article provides you with a list and description of the system reports avai | PCI DSS | Detailed

Summary

Dashboard | CSV | **Dashboard**: This report helps track the overall progress of the PCI-DSS benchmark. It lists the percentage passing, overall pass or fail of test control along with the breakup of L1/L2 per Auth system.

**Summary**: For each authorized system, this report lists the test control pass or fail per authorized system and the number of resources evaluated for each test control.

**Detailed**: This report helps auditors and administrators to track the resource level pass or fail per test control. | AWS, Azure, or GCP | Yes | | PCI History | Summary | CSV | This report helps track **Monthly PCI History** for each authorized system. It can be used to plot the trend of the PCI. | AWS, Azure, or GCP | Yes | | Permissions Analytics Report (PAR) | Summary | PDF | This report helps monitor the **Identity Privilege** related activity across the authorized systems. It captures any Identity permission change.

This report has the following main sections: **User Summary**, **Group Summary**, **Role Summary & Delete Task Summary**.

The **User Summary** lists the current granted permissions along with high-risk permissions and resources accessed in 1-day, 7-day, or 30-days durations. There are subsections for newly added or deleted users, users with PCI change, high-risk active/inactive users.

The **Group Summary** lists the administrator level groups with the current granted permissions along with high-risk permissions and resources accessed in 1-day, 7-day, or 30-day durations. There are subsections for newly added or deleted groups, groups with PCI change, High-risk active/inactive groups.

The **Role Summary** and the **Group Summary** list similar details.

The **Delete Task** summary section lists the number of times the **Delete Task** has been executed in the given period. | AWS, Azure, or GCP | No | -| Permissions Analytics Report (PAR) | Detailed | CSV | This report lists the different key findings in the selected authorized systems. The key findings include **Super identities**, **Inactive identities**, **Over-provisioned active identities**, **Storage bucket hygiene**, **Access key age (AWS)**, and so on.

This report helps administrators to visualize the findings across the organization and make decisions. | AWS, Azure, or GCP | Yes | +| Permissions Analytics Report (PAR) | Detailed | CSV | This report lists the different key findings in the selected authorized systems. The key findings include **Super identities**, **Inactive identities**, **Over-provisioned active identities**, **Storage bucket hygiene**, **Access key age (AWS)**, and so on.

This report helps administrators to visualize the findings across the organization and make decisions. | AWS, Azure, or GCP | Yes | | Role/Policy Details | Summary | CSV | This report captures **Assigned/Unassigned** and **Custom/system policy with used/unused condition** for specific or all AWS accounts.

Similar data can be captured for Azure and GCP for assigned and unassigned roles. | AWS, Azure, or GCP | No | | User Entitlements and Usage | Detailed

Summary | CSV | This report provides a summary and details of **User entitlements and usage**.

**Data displayed on Usage Analytics** screen is downloaded as part of the **Summary** report.

**Detailed permissions usage per User** is listed in the Detailed report. | AWS, Azure, or GCP | Yes | ## Next steps -- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](cloudknox-product-reports.md). -- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](cloudknox-report-view-system-report.md). -- For information about how to create and view a custom report, see [Generate and view a custom report](cloudknox-report-create-custom-report.md). -- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](cloudknox-product-permissions-analytics-reports.md). +- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](product-reports.md). +- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](report-view-system-report.md). +- For information about how to create and view a custom report, see [Generate and view a custom report](report-create-custom-report.md). +- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-enable-tenant.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-enable-tenant.md deleted file mode 100644 index d8c80ae7996f..000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-enable-tenant.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Enable CloudKnox Permissions Management in your organization -description: How to enable CloudKnox Permissions Management in your organization. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 04/20/2022 -ms.author: kenwith ---- - -# Enable CloudKnox in your organization - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - - -> [!NOTE] -> The CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). - - - -This article describes how to enable CloudKnox Permissions Management (CloudKnox) in your organization. Once you've enabled CloudKnox, you can connect it to your Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) platforms. - -> [!NOTE] -> To complete this task, you must have *global administrator* permissions as a user in that tenant. You can't enable CloudKnox as a user from other tenant who has signed in via B2B or via Azure Lighthouse. - -## Prerequisites - -To enable CloudKnox in your organization: - -- You must have an Azure AD tenant. If you don't already have one, [create a free account](https://azure.microsoft.com/free/). -- You must be eligible for or have an active assignment to the global administrator role as a user in that tenant. - -> [!NOTE] -> During public preview, CloudKnox doesn't perform a license check. - -## View a training video on enabling CloudKnox - -- To view a video on how to enable CloudKnox in your Azure AD tenant, select [Enable CloudKnox in your Azure AD tenant](https://www.youtube.com/watch?v=-fkfeZyevoo). -- To view a video on how to configure and onboard AWS accounts in CloudKnox, select [Configure and onboard AWS accounts](https://www.youtube.com/watch?v=R6K21wiWYmE). -- To view a video on how to configure and onboard GCP accounts in CloudKnox, select [Configure and onboard GCP accounts](https://www.youtube.com/watch?app=desktop&v=W3epcOaec28). - - -## How to enable CloudKnox on your Azure AD tenant - -1. In your browser: - 1. Go to [Azure services](https://portal.azure.com) and use your credentials to sign in to [Azure Active Directory](https://ms.portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/Overview). - 1. If you aren't already authenticated, sign in as a global administrator user. - 1. If needed, activate the global administrator role in your Azure AD tenant. - 1. In the Azure AD portal, select **Features highlights**, and then select **CloudKnox Permissions Management**. - - 1. If you're prompted to select a sign in account, sign in as a global administrator for a specified tenant. - - The **Welcome to CloudKnox Permissions Management** screen appears, displaying information on how to enable CloudKnox on your tenant. - -1. To provide access to the CloudKnox application, create a service principal. - - An Azure service principal is a security identity used by user-created apps, services, and automation tools to access specific Azure resources. - - > [!NOTE] - > To complete this step, you must have Azure CLI or Azure PowerShell on your system, or an Azure subscription where you can run Cloud Shell. - - - To create a service principal that points to the CloudKnox application via Cloud Shell: - - 1. Copy the script on the **Welcome** screen: - - `az ad sp create --id b46c3ac5-9da6-418f-a849-0a07a10b3c6c` - - 1. If you have an Azure subscription, return to the Azure AD portal and select **Cloud Shell** on the navigation bar. - If you don't have an Azure subscription, open a command prompt on a Windows Server. - 1. If you have an Azure subscription, paste the script into Cloud Shell and press **Enter**. - - - For information on how to create a service principal through the Azure portal, see [Create an Azure service principal with the Azure CLI](/cli/azure/create-an-azure-service-principal-azure-cli). - - - For information on the **az** command and how to sign in with the no subscriptions flag, see [az login](/cli/azure/reference-index?view=azure-cli-latest#az-login&preserve-view=true). - - - For information on how to create a service principal via Azure PowerShell, see [Create an Azure service principal with Azure PowerShell](/powershell/azure/create-azure-service-principal-azureps?view=azps-7.1.0&preserve-view=true). - - 1. After the script runs successfully, the service principal attributes for CloudKnox display. Confirm the attributes. - - The **Cloud Infrastructure Entitlement Management** application displays in the Azure AD portal under **Enterprise applications**. - -1. Return to the **Welcome to CloudKnox** screen and select **Enable CloudKnox Permissions Management**. - - You have now completed enabling CloudKnox on your tenant. CloudKnox launches with the **Data Collectors** dashboard. - -## Configure data collection settings - -Use the **Data Collectors** dashboard in CloudKnox to configure data collection settings for your authorization system. - -1. If the **Data Collectors** dashboard isn't displayed when CloudKnox launches: - - - In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. - -1. Select the authorization system you want: **AWS**, **Azure**, or **GCP**. - -1. For information on how to onboard an AWS account, Azure subscription, or GCP project into CloudKnox, select one of the following articles and follow the instructions: - - - [Onboard an AWS account](cloudknox-onboard-aws.md) - - [Onboard an Azure subscription](cloudknox-onboard-azure.md) - - [Onboard a GCP project](cloudknox-onboard-gcp.md) - -## Next steps - -- For an overview of CloudKnox, see [What's CloudKnox Permissions Management?](cloudknox-overview.md) -- For a list of frequently asked questions (FAQs) about CloudKnox, see [FAQs](cloudknox-faqs.md). -- For information on how to start viewing information about your authorization system in CloudKnox, see [View key statistics and data about your authorization system](cloudknox-ui-dashboard.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-reports.md b/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-reports.md deleted file mode 100644 index be79c6b0acb4..000000000000 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-reports.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: View system reports in the Reports dashboard in CloudKnox Permissions Management -description: How to view system reports in the Reports dashboard in CloudKnox Permissions Management. -services: active-directory -author: kenwith -manager: rkarlin -ms.service: active-directory -ms.subservice: ciem -ms.workload: identity -ms.topic: how-to -ms.date: 02/23/2022 -ms.author: kenwith ---- - -# View system reports in the Reports dashboard - -> [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. -> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -CloudKnox Permissions Management (CloudKnox) has various types of system report types available that capture specific sets of data. These reports allow management to: - -- Make timely decisions. -- Analyze trends and system/user performance. -- Identify trends in data and high risk areas so that management can address issues more quickly and improve their efficiency. - -## Explore the Reports dashboard - -The **Reports** dashboard provides a table of information with both system reports and custom reports. The **Reports** dashboard defaults to the **System Reports** tab, which has the following details: - -- **Report Name**: The name of the report. -- **Category**: The type of report. For example, **Permission**. -- **Authorization Systems**: Displays which authorizations the custom report applies to. -- **Format**: Displays the output format the report can be generated in. For example, comma-separated values (CSV) format, portable document format (PDF), or Microsoft Excel Open XML Spreadsheet (XLSX) format. - - - To download a report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. - - The following message displays across the top of the screen in green if the download is successful: **Successfully Started To Generate On Demand Report**. - -## Available system reports - -CloudKnox offers the following reports for management associated with the authorization systems noted in parenthesis: - -- **Access Key Entitlements And Usage**: - - **Summary of report**: Provides information about access key, for example, permissions, usage, and rotation date. - - **Applies to**: Amazon Web Services (AWS) and Microsoft Azure - - **Report output type**: CSV - - **Ability to collate report**: Yes - - **Type of report**: **Summary** or **Detailed** - - **Use cases**: - - The access key age, last rotation date, and last usage date is available in the summary report to help with key rotation. - - The granted task and Permissions creep index (PCI) score to take action on the keys. - -- **User Entitlements And Usage**: - - **Summary of report**: Provides information about the identities' permissions, for example, entitlement, usage, and PCI. - - **Applies to**: AWS, Azure, and Google Cloud Platform (GCP) - - **Report output type**: CSV - - **Ability to collate report**: Yes - - **Type of report**: **Summary** or **Detailed** - - **Use cases**: - - The data displayed on the **Usage Analytics** screen is downloaded as part of the **Summary** report. The user's detailed permissions usage is listed in the **Detailed** report. - -- **Group Entitlements And Usage**: - - **Summary of report**: Provides information about the group's permissions, for example, entitlement, usage, and PCI. - - **Applies to**: AWS, Azure, and GCP - - **Report output type**: CSV - - **Ability to collate report**: Yes - - **Type of report**: **Summary** - - **Use cases**: - - All group level entitlements and permission assignments, PCIs, and the number of members are listed as part of this report. - -- **Identity Permissions**: - - **Summary of report**: Report on identities that have specific permissions, for example, identities that have permission to delete any S3 buckets. - - **Applies to**: AWS, Azure, and GCP - - **Report output type**: CSV - - **Ability to collate report**: No - - **Type of report**: **Summary** - - **Use cases**: - - Any task usage or specific task usage via User/Group/Role/App can be tracked with this report. - -- **Identity privilege activity report** - - **Summary of report**: Provides information about permission changes that have occurred in the selected duration. - - **Applies to**: AWS, Azure, and GCP - - **Report output type**: PDF - - **Ability to collate report**: No - - **Type of report**: **Summary** - - **Use cases**: - - Any identity permission change can be captured using this report. - - The **Identity Privilege Activity** report has the following main sections: **User Summary**, **Group Summary**, **Role Summary**, and **Delete Task Summary**. - - The **User** summary lists the current granted permissions and high-risk permissions and resources accessed in 1 day, 7 days, or 30 days. There are subsections for newly added or deleted users, users with PCI change, and High-risk active/inactive users. - - The **Group** summary lists the administrator level groups with the current granted permissions and high-risk permissions and resources accessed in 1 day, 7 days, or 30 days. There are subsections for newly added or deleted groups, groups with PCI change, and High-risk active/inactive groups. - - The **Role summary** lists similar details as **Group Summary**. - - The **Delete Task summary** section lists the number of times the **Delete task** has been executed in the given time period. - -- **Permissions Analytics Report** - - **Summary of report**: Provides information about the violation of key security best practices. - - **Applies to**: AWS, Azure, and GCP - - **Report output type**: CSV - - **Ability to collate report**: Yes - - **Type of report**: **Detailed** - - **Use cases**: - - This report lists the different key findings in the selected auth systems. The key findings include super identities, inactive identities, over provisioned active identities, storage bucket hygiene, and access key age (for AWS only). The report helps administrators to visualize the findings across the organization. - - For more information about this report, see [Permissions analytics report](cloudknox-product-permissions-analytics-reports.md). - -- **Role/Policy Details** - - **Summary of report**: Provides information about roles and policies. - - **Applies to**: AWS, Azure, GCP - - **Report output type**: CSV - - **Ability to collate report**: No - - **Type of report**: **Summary** - - **Use cases**: - - Assigned/Unassigned, custom/system policy, and the used/unused condition is captured in this report for any specific, or all, AWS accounts. Similar data can be captured for Azure/GCP for the assigned/unassigned roles. - -- **PCI History** - - **Summary of report**: Provides a report of privilege creep index (PCI) history. - - **Applies to**: AWS, Azure, GCP - - **Report output type**: CSV - - **Ability to collate report**: Yes - - **Type of report**: **Summary** - - **Use cases**: - - This report plots the trend of the PCI by displaying the monthly PCI history for each authorization system. - -- **All Permissions for Identity** - - **Summary of report**: Provides results of all permissions for identities. - - **Applies to**: AWS, Azure, GCP - - **Report output type**: CSV - - **Ability to collate report**: Yes - - **Type of report**: **Detailed** - - **Use cases**: - - This report lists all the assigned permissions for the selected identities. - - - - -## Next steps - -- For a detailed overview of available system reports, see [View a list and description of system reports](cloudknox-all-reports.md). -- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](cloudknox-report-view-system-report.md). -- For information about how to create and view a custom report, see [Generate and view a custom report](cloudknox-report-create-custom-report.md). -- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](cloudknox-product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-faqs.md b/articles/active-directory/cloud-infrastructure-entitlement-management/faqs.md similarity index 84% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-faqs.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/faqs.md index b06e14cd767e..26d8ebef64e7 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-faqs.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/faqs.md @@ -1,5 +1,5 @@ --- -title: Frequently asked questions (FAQs) about CloudKnox Permissions Management +title: Frequently asked questions (FAQs) about CloudKnox Permissions Management description: Frequently asked questions (FAQs) about CloudKnox Permissions Management. services: active-directory author: kenwith @@ -18,7 +18,7 @@ ms.author: kenwith > CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -> [!NOTE] +> [!NOTE] > The CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). @@ -26,26 +26,26 @@ This article answers frequently asked questions (FAQs) about CloudKnox Permissio ## What's CloudKnox Permissions Management? -CloudKnox is a cloud infrastructure entitlement management (CIEM) solution that provides comprehensive visibility into permissions assigned to all identities. For example, over-privileged workload and user identities, actions, and resources across multi-cloud infrastructures in Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). CloudKnox detects, automatically right-sizes, and continuously monitors unused and excessive permissions. It deepens the Zero Trust security strategy by augmenting the least privilege access principle. +CloudKnox is a cloud infrastructure entitlement management (CIEM) solution that provides comprehensive visibility into permissions assigned to all identities. For example, over-privileged workload and user identities, actions, and resources across multi-cloud infrastructures in Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). CloudKnox detects, automatically right-sizes, and continuously monitors unused and excessive permissions. It deepens the Zero Trust security strategy by augmenting the least privilege access principle. ## What are the prerequisites to use CloudKnox? CloudKnox supports data collection from AWS, GCP, and/or Microsoft Azure. For data collection and analysis, customers are required to have an Azure Active Directory (Azure AD) account to use CloudKnox. -## Can a customer use CloudKnox if they have other identities with access to their IaaS platform that aren’t yet in Azure AD (for example, if part of their business has Okta or AWS Identity & Access Management (IAM))? +## Can a customer use CloudKnox if they have other identities with access to their IaaS platform that aren't yet in Azure AD (for example, if part of their business has Okta or AWS Identity & Access Management (IAM))? -Yes, a customer can detect, mitigate, and monitor the risk of ‘backdoor’ accounts that are local to AWS IAM, GCP, or from other identity providers such as Okta or AWS IAM. +Yes, a customer can detect, mitigate, and monitor the risk of 'backdoor' accounts that are local to AWS IAM, GCP, or from other identity providers such as Okta or AWS IAM. ## Where can customers access CloudKnox? -Customers can access the CloudKnox interface with a link from the Azure AD extension in the Azure portal. +Customers can access the CloudKnox interface with a link from the Azure AD extension in the Azure portal. ## Can non-cloud customers use CloudKnox on-premises? -No, CloudKnox is a hosted cloud offering. +No, CloudKnox is a hosted cloud offering. -## Can non-Azure customers use CloudKnox? +## Can non-Azure customers use CloudKnox? Yes, non-Azure customers can use our solution. CloudKnox is a multi-cloud solution so even customers who have no subscription to Azure can benefit from it. @@ -53,21 +53,21 @@ Yes, non-Azure customers can use our solution. CloudKnox is a multi-cloud soluti No, the CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). -## If I’m already using Azure AD Privileged Identity Management (PIM) for Azure, what value does CloudKnox provide? +## If I'm already using Azure AD Privileged Identity Management (PIM) for Azure, what value does CloudKnox provide? -CloudKnox complements Azure AD PIM. Azure AD PIM provides just-in-time access for admin roles in Azure (as well as Microsoft Online Services and apps that use groups), while CloudKnox allows multi-cloud discovery, remediation, and monitoring of privileged access across Azure, AWS, and GCP. +CloudKnox complements Azure AD PIM. Azure AD PIM provides just-in-time access for admin roles in Azure (as well as Microsoft Online Services and apps that use groups), while CloudKnox allows multi-cloud discovery, remediation, and monitoring of privileged access across Azure, AWS, and GCP. ## What languages does CloudKnox support? -CloudKnox currently supports English. +CloudKnox currently supports English. ## What public cloud infrastructures are supported by CloudKnox? -CloudKnox currently supports the three major public clouds: Amazon Web Services (AWS), Google Cloud Platform (GCP), and Microsoft Azure. +CloudKnox currently supports the three major public clouds: Amazon Web Services (AWS), Google Cloud Platform (GCP), and Microsoft Azure. ## Does CloudKnox support hybrid environments? -CloudKnox currently doesn’t support hybrid environments. +CloudKnox currently doesn't support hybrid environments. ## What types of identities are supported by CloudKnox? @@ -79,11 +79,11 @@ CloudKnox is currently not GDPR compliant.---> ## Is CloudKnox available in Government Cloud? -No, CloudKnox is currently not available in Government clouds. +No, CloudKnox is currently not available in Government clouds. ## Is CloudKnox available for sovereign clouds? -No, CloudKnox is currently not available in sovereign Clouds. +No, CloudKnox is currently not available in sovereign Clouds. ## How does CloudKnox collect insights about permissions usage? @@ -95,7 +95,7 @@ CloudKnox offers granular visibility into all identities and their permissions g ## What is the Permissions Creep Index? -The Permissions Creep Index (PCI) is a quantitative measure of risk associated with an identity or role determined by comparing permissions granted versus permissions exercised. It allows users to instantly evaluate the level of risk associated with the number of unused or over-provisioned permissions across identities and resources. It measures how much damage identities can cause based on the permissions they have. +The Permissions Creep Index (PCI) is a quantitative measure of risk associated with an identity or role determined by comparing permissions granted versus permissions exercised. It allows users to instantly evaluate the level of risk associated with the number of unused or over-provisioned permissions across identities and resources. It measures how much damage identities can cause based on the permissions they have. ## How can customers use CloudKnox to delete unused or excessive permissions? @@ -107,11 +107,11 @@ For any break-glass or one-off scenarios where an identity needs to perform a sp ## What is the difference between permissions on-demand and just-in-time access? -Just-in-time (JIT) access is a method used to enforce the principle of least privilege to ensure identities are given the minimum level of permissions to perform the task at hand. Permissions on-demand are a type of JIT access that allows the temporary elevation of permissions, enabling identities to access resources on a by-request, timed basis. +Just-in-time (JIT) access is a method used to enforce the principle of least privilege to ensure identities are given the minimum level of permissions to perform the task at hand. Permissions on-demand are a type of JIT access that allows the temporary elevation of permissions, enabling identities to access resources on a by-request, timed basis. ## How can customers monitor permissions usage with CloudKnox? -Customers only need to track the evolution of their Permission Creep Index to monitor permissions usage. They can do this in the “Analytics” tab in their CloudKnox dashboard where they can see how the PCI of each identity or resource is evolving over time. +Customers only need to track the evolution of their Permission Creep Index to monitor permissions usage. They can do this in the "Analytics" tab in their CloudKnox dashboard where they can see how the PCI of each identity or resource is evolving over time. ## Can customers generate permissions usage reports? @@ -120,7 +120,7 @@ Yes, CloudKnox has various types of system report available that capture specifi - Analyze usage trends and system/user performance. - Identify high-risk areas. -For information about permissions usage reports, see [Generate and download the Permissions analytics report](cloudknox-product-permissions-analytics-reports.md). +For information about permissions usage reports, see [Generate and download the Permissions analytics report](product-permissions-analytics-reports.md). ## Does CloudKnox integrate with third-party ITSM (Information Technology Security Management) tools? @@ -141,9 +141,9 @@ Once fully onboarded with data collection set up, customers can access permissio ## Is CloudKnox collecting and storing sensitive personal data? -No, CloudKnox doesn’t have access to sensitive personal data. +No, CloudKnox doesn't have access to sensitive personal data. -## Where can I find more information about CloudKnox? +## Where can I find more information about CloudKnox? You can read our blog and visit our web page. You can also get in touch with your Microsoft point of contact to schedule a demo. @@ -156,5 +156,5 @@ You can read our blog and visit our web page. You can also get in touch with you ## Next steps -- For an overview of CloudKnox, see [What's CloudKnox Permissions Management?](cloudknox-overview.md). -- For information on how to onboard CloudKnox in your organization, see [Enable CloudKnox in your organization](cloudknox-onboard-enable-tenant.md). +- For an overview of CloudKnox, see [What's CloudKnox Permissions Management?](overview.md). +- For information on how to onboard CloudKnox in your organization, see [Enable CloudKnox in your organization](onboard-enable-tenant.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-add-remove-role-task.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-add-remove-role-task.md similarity index 76% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-add-remove-role-task.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-add-remove-role-task.md index c9f6dd44a309..d07250a8bd6e 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-add-remove-role-task.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-add-remove-role-task.md @@ -1,6 +1,6 @@ --- -title: Add and remove roles and tasks for groups, users, and service accounts for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in CloudKnox Permissions Management -description: How to attach and detach permissions for groups, users, and service accounts for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in CloudKnox Permissions Management. +title: Add and remove roles and tasks for groups, users, and service accounts for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in Permissions Management +description: How to attach and detach permissions for groups, users, and service accounts for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -12,27 +12,27 @@ ms.date: 02/23/2022 ms.author: kenwith --- -# Add and remove roles and tasks for Microsoft Azure and Google Cloud Platform (GCP) identities +# Add and remove roles and tasks for Microsoft Azure and Google Cloud Platform (GCP) identities > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management (Entra) is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. This article describes how you can add and remove roles and tasks for Microsoft Azure and Google Cloud Platform (GCP) identities using the **Remediation** dashboard. > [!NOTE] -> To view the **Remediation** tab, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. +> To view the **Remediation** tab, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. ## View permissions -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Entra home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. 1. From the **Authorization System** dropdown, select the accounts you want to access. 1. From the **Search For** dropdown, select **Group**, **User**, or **APP**. 1. To search for more parameters, you can make a selection from the **User States**, **Permission Creep Index**, and **Task Usage** dropdowns. 1. Select **Apply**. - CloudKnox displays a list of groups, users, and service accounts that match your criteria. + Entra displays a list of groups, users, and service accounts that match your criteria. 1. In **Enter a username**, enter or select a user. 1. In **Enter a Group Name**, enter or select a group, then select **Apply**. 1. Make a selection from the results list. @@ -42,64 +42,64 @@ This article describes how you can add and remove roles and tasks for Microsoft ## Add a role -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Entra home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. 1. From the **Authorization System** dropdown, select the accounts you want to access. 1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. 1. Make a selection from the results list. 1. To attach a role, select **Add role**. -1. In the **Add Role** page, from the **Available Roles** list, select the plus sign **(+)** to move the role to the **Selected Roles** list. +1. In the **Add Role** page, from the **Available Roles** list, select the plus sign **(+)** to move the role to the **Selected Roles** list. 1. When you have finished adding roles, select **Submit**. -1. When the following message displays: **Are you sure you want to change permission?**, select: +1. When the following message displays: **Are you sure you want to change permission?**, select: - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - **Execute** to change the permission. - **Close** to cancel the action. ## Remove a role -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Entra home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. 1. From the **Authorization System** dropdown, select the accounts you want to access. 1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. 1. Make a selection from the results list. 1. To remove a role, select **Remove Role**. -1. In the **Remove Role** page, from the **Available Roles** list, select the plus sign **(+)** to move the role to the **Selected Roles** list. +1. In the **Remove Role** page, from the **Available Roles** list, select the plus sign **(+)** to move the role to the **Selected Roles** list. 1. When you have finished selecting roles, select **Submit**. -1. When the following message displays: **Are you sure you want to change permission?**, select: +1. When the following message displays: **Are you sure you want to change permission?**, select: - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - **Execute** to change the permission. - **Close** to cancel the action. ## Add a task -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. 1. From the **Authorization System** dropdown, select the accounts you want to access. 1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. 1. Make a selection from the results list. 1. To attach a role, select **Add Tasks**. -1. In the **Add Tasks** page, from the **Available Tasks** list, select the plus sign **(+)** to move the task to the **Selected Tasks** list. +1. In the **Add Tasks** page, from the **Available Tasks** list, select the plus sign **(+)** to move the task to the **Selected Tasks** list. 1. When you have finished adding tasks, select **Submit**. -1. When the following message displays: **Are you sure you want to change permission?**, select: +1. When the following message displays: **Are you sure you want to change permission?**, select: - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - **Execute** to change the permission. - **Close** to cancel the action. ## Remove a task -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Entra home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. 1. From the **Authorization System** dropdown, select the accounts you want to access. 1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. 1. Make a selection from the results list. 1. To remove a task, select **Remove Tasks**. -1. In the **Remove Tasks** page, from the **Available Tasks** list, select the plus sign **(+)** to move the task to the **Selected Tasks** list. +1. In the **Remove Tasks** page, from the **Available Tasks** list, select the plus sign **(+)** to move the task to the **Selected Tasks** list. 1. When you have finished selecting tasks, select **Submit**. -1. When the following message displays: **Are you sure you want to change permission?**, select: +1. When the following message displays: **Are you sure you want to change permission?**, select: - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - **Execute** to change the permission. - **Close** to cancel the action. @@ -107,12 +107,12 @@ This article describes how you can add and remove roles and tasks for Microsoft ## Next steps -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for Amazon Web Services (AWS) identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for Amazon Web Services (AWS) identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-attach-detach-permissions.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-attach-detach-permissions.md similarity index 74% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-attach-detach-permissions.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-attach-detach-permissions.md index 6054e4c1c99c..fc27f2074090 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-attach-detach-permissions.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-attach-detach-permissions.md @@ -1,6 +1,6 @@ --- -title: Attach and detach permissions for users, roles, and groups for Amazon Web Services (AWS) identities in the Remediation dashboard in CloudKnox Permissions Management -description: How to attach and detach permissions for users, roles, and groups for Amazon Web Services (AWS) identities in the Remediation dashboard in CloudKnox Permissions Management. +title: Attach and detach permissions for users, roles, and groups for Amazon Web Services (AWS) identities in the Remediation dashboard in Permissions Management +description: How to attach and detach permissions for users, roles, and groups for Amazon Web Services (AWS) identities in the Remediation dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -12,27 +12,27 @@ ms.date: 02/23/2022 ms.author: kenwith --- -# Attach and detach policies for Amazon Web Services (AWS) identities +# Attach and detach policies for Amazon Web Services (AWS) identities > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. This article describes how you can attach and detach permissions for users, roles, and groups for Amazon Web Services (AWS) identities using the **Remediation** dashboard. > [!NOTE] -> To view the **Remediation** tab, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. +> To view the **Remediation** tab, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. ## View permissions -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **AWS**. 1. From the **Authorization System** dropdown, select the accounts you want to access. 1. From the **Search For** dropdown, select **Group**, **User**, or **Role**. 1. To search for more parameters, you can make a selection from the **User States**, **Permission Creep Index**, and **Task Usage** dropdowns. 1. Select **Apply**. - CloudKnox displays a list of users, roles, or groups that match your criteria. + Permissions Management displays a list of users, roles, or groups that match your criteria. 1. In **Enter a username**, enter or select a user. 1. In **Enter a group name**, enter or select a group, then select **Apply**. 1. Make a selection from the results list. @@ -42,30 +42,30 @@ This article describes how you can attach and detach permissions for users, role ## Attach policies -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **AWS**. 1. In **Enter a username**, enter or select a user. 1. In **Enter a Group Name**, enter or select a group, then select **Apply**. 1. Make a selection from the results list. 1. To attach a policy, select **Attach Policies**. -1. In the **Attach Policies** page, from the **Available policies** list, select the plus sign **(+)** to move the policy to the **Selected policies** list. +1. In the **Attach Policies** page, from the **Available policies** list, select the plus sign **(+)** to move the policy to the **Selected policies** list. 1. When you have finished adding policies, select **Submit**. -1. When the following message displays: **Are you sure you want to change permission?**, select: +1. When the following message displays: **Are you sure you want to change permission?**, select: - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - **Execute** to change the permission. - **Close** to cancel the action. ## Detach policies -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Permissions Management Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **AWS**. 1. In **Enter a username**, enter or select a user. 1. In **Enter a Group Name**, enter or select a group, then select **Apply**. 1. Make a selection from the results list. 1. To remove a policy, select **Detach Policies**. -1. In the **Detach Policies** page, from the **Available policies** list, select the plus sign **(+)** to move the policy to the **Selected policies** list. +1. In the **Detach Policies** page, from the **Available policies** list, select the plus sign **(+)** to move the policy to the **Selected policies** list. 1. When you have finished selecting policies, select **Submit**. -1. When the following message displays: **Are you sure you want to change permission?**, select: +1. When the following message displays: **Are you sure you want to change permission?**, select: - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - **Execute** to change the permission. - **Close** to cancel the action. @@ -73,12 +73,11 @@ This article describes how you can attach and detach permissions for users, role ## Next steps -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). - +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-audit-trail-results.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-audit-trail-results.md similarity index 66% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-audit-trail-results.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-audit-trail-results.md index 8b383ad66a58..2f94f20e9795 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-audit-trail-results.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-audit-trail-results.md @@ -1,6 +1,6 @@ --- -title: Generate an on-demand report from a query in the Audit dashboard in CloudKnox Permissions Management -description: How to generate an on-demand report from a query in the **Audit** dashboard in CloudKnox Permissions Management. +title: Generate an on-demand report from a query in the Audit dashboard in Permissions Management +description: How to generate an on-demand report from a query in the **Audit** dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,10 +15,10 @@ ms.author: kenwith # Generate an on-demand report from a query > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how you can generate an on-demand report from a query in the **Audit** dashboard in CloudKnox Permissions Management (CloudKnox). You can: +This article describes how you can generate an on-demand report from a query in the **Audit** dashboard in Permissions Management. You can: - Run a report on-demand. - Schedule and run a report as often as you want. @@ -26,13 +26,13 @@ This article describes how you can generate an on-demand report from a query in ## Generate a custom report on-demand -1. In the CloudKnox home page, select the **Audit** tab. +1. In the Permissions Management home page, select the **Audit** tab. - CloudKnox displays the query options available to you. + Permissions Management displays the query options available to you. 1. In the **Audit** dashboard, select **Search** to run the query. 1. Select **Export**. - CloudKnox generates the report and exports it in comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. + Permissions Management generates the report and exports it in comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. ## Next steps -- For information on how to view how users access information, see [Use queries to see how users access information](cloudknox-ui-audit-trail.md). -- For information on how to filter and view user activity, see [Filter and query user activity](cloudknox-product-audit-trail.md). -- For information on how to create a query,see [Create a custom query](cloudknox-howto-create-custom-queries.md). +- For information on how to view how users access information, see [Use queries to see how users access information](ui-audit-trail.md). +- For information on how to filter and view user activity, see [Filter and query user activity](product-audit-trail.md). +- For information on how to create a query,see [Create a custom query](how-to-create-custom-queries.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-clone-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-clone-role-policy.md similarity index 63% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-clone-role-policy.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-clone-role-policy.md index b922cd5fc904..9ae6da95198f 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-clone-role-policy.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-clone-role-policy.md @@ -1,5 +1,5 @@ --- -title: Clone a role/policy in the Remediation dashboard in CloudKnox Permissions Management +title: Clone a role/policy in the Remediation dashboard in Permissions Management description: How to clone a role/policy in the Just Enough Permissions (JEP) Controller. services: active-directory author: kenwith @@ -15,28 +15,28 @@ ms.author: kenwith # Clone a role/policy in the Remediation dashboard > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how you can use the **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) to clone roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. +This article describes how you can use the **Remediation** dashboard in Permissions Management to clone roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. > [!NOTE] -> To view the **Remediation** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. +> To view the **Remediation** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. > [!NOTE] -> Microsoft Azure uses the term *role* for what other Cloud providers call *policy*. CloudKnox automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. +> Microsoft Azure uses the term *role* for what other Cloud providers call *policy*. Permissions Management automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. ## Clone a role/policy -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** tab. +1. On the Permissions Management Permissions Management home page, select the **Remediation** tab, and then select the **Role/Policies** tab. 1. Select the role/policy you want to clone, and from the **Actions** column, select **Clone**. -1. **(AWS Only)** In the **Clone** box, the **Clone Resources** and **Clone Conditions** checkboxes are automatically selected. +1. **(AWS Only)** In the **Clone** box, the **Clone Resources** and **Clone Conditions** checkboxes are automatically selected. Deselect the boxes if the resources and conditions are different from what is displayed. 1. Enter a name for each authorization system that was selected in the **Policy Name** boxes, and then select **Next**. 1. If the data collector hasn't been given controller privileges, the following message displays: **Only online/controller-enabled authorization systems can be submitted for cloning.** - To clone this role manually, download the script and JSON file. + To clone this role manually, download the script and JSON file. 1. Select **Submit**. 1. Refresh the **Role/Policies** tab to see the role/policy you cloned. @@ -44,12 +44,12 @@ This article describes how you can use the **Remediation** dashboard in CloudKno ## Next steps -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). -- For information on how to view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md) +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). +- For information on how to view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-alert-trigger.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-alert-trigger.md similarity index 82% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-alert-trigger.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-alert-trigger.md index fb9489154277..aa7340f908e9 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-alert-trigger.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-alert-trigger.md @@ -1,6 +1,6 @@ --- -title: Create and view activity alerts and alert triggers in CloudKnox Permissions Management -description: How to create and view activity alerts and alert triggers in CloudKnox Permissions Management. +title: Create and view activity alerts and alert triggers in Permissions Management +description: How to create and view activity alerts and alert triggers in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,14 +15,14 @@ ms.author: kenwith # Create and view activity alerts and alert triggers > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how you can create and view activity alerts and alert triggers in CloudKnox Permissions Management (CloudKnox). +This article describes how you can create and view activity alerts and alert triggers in Permissions Management. ## Create an activity alert trigger -1. In the CloudKnox home page, select **Activity Triggers** (the bell icon). +1. In the Permissions Management home page, select **Activity Triggers** (the bell icon). 1. In the **Activity** tab, select **Create Activity Trigger**. 1. In the **Alert Name** box, enter a name for your alert. 1. In **Authorization System Type**, select your authorization system: Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). @@ -31,7 +31,7 @@ This article describes how you can create and view activity alerts and alert tri 1. From the **Operator** dropdown, select an option: - **Is**/**Is Not**: Select in the value field to view a list of all available values. You can either select or enter the required value. - - **Contains**/**Not Contains**: Enter any text that the query parameter should or shouldn't contain, for example *CloudKnox*. + - **Contains**/**Not Contains**: Enter any text that the query parameter should or shouldn't contain, for example *Permissions Management*. - **In**/**Not In**: Select in the value field to view list of all available values. Select the required multiple values. 1. To add another parameter, select the plus sign **(+)**, then select an operator, and then enter a value. @@ -46,7 +46,7 @@ This article describes how you can create and view activity alerts and alert tri ## View an activity alert -1. In the CloudKnox home page, select **Activity Triggers** (the bell icon). +1. In the Permissions Management home page, select **Activity Triggers** (the bell icon). 1. In the **Activity** tab, select the **Alerts** subtab. 1. From the **Alert Name** dropdown, select an alert. 1. From the **Date** dropdown, select **Last 24 Hours**, **Last 2 Days**, **Last Week**, or **Custom Range**. @@ -60,14 +60,14 @@ This article describes how you can create and view activity alerts and alert tri ## View activity alert triggers -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). 1. In the **Activity** tab, select the **Alert Triggers** subtab. 1. From the **Status** dropdown, select **All**, **Activated** or **Deactivated**, then select **Apply**. The **Triggers** table displays the following information: - **Alerts**: The name of the alert trigger. - - **# of users subscribed**: The number of users who have subscribed to a specific alert trigger. + - **# of users subscribed**: The number of users who have subscribed to a specific alert trigger. - Select a number in this column to view information about the user. @@ -79,13 +79,13 @@ This article describes how you can create and view activity alerts and alert tri - If the column displays **Off**, the current user isn't subscribed to that alert. Switch the toggle to **On** to subscribe to the alert. - The user who creates an alert trigger is automatically subscribed to the alert, and will receive emails about the alert. -1. To see only activated or only deactivated triggers, from the **Status** dropdown, select **Activated** or **Deactivated**, and then select **Apply**. +1. To see only activated or only deactivated triggers, from the **Status** dropdown, select **Activated** or **Deactivated**, and then select **Apply**. 1. To view other options available to you, select the ellipses (**...**), and then select from the available options. If the **Subscription** is **On**, the following options are available: - - **Edit**: Enables you to modify alert parameters + - **Edit**: Enables you to modify alert parameters > [!NOTE] > Only the user who created the alert can perform the following actions: edit the trigger screen, rename an alert, deactivate an alert, and delete an alert. Changes made by other users aren't saved. @@ -94,12 +94,12 @@ This article describes how you can create and view activity alerts and alert tri - **Rename**: Enter the new name of the query, and then select **Save.** - **Deactivate**: The alert will still be listed, but will no longer send emails to subscribed users. - **Activate**: Activate the alert trigger and start sending emails to subscribed users. - - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger and their **User Status**. + - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger and their **User Status**. - **Delete**: Delete the alert. If the **Subscription** is **Off**, the following options are available: - **View**: View details of the alert trigger. - - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger and their **User Status**. + - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger and their **User Status**. - **Duplicate**: Create a duplicate copy of the selected alert trigger. @@ -107,7 +107,7 @@ This article describes how you can create and view activity alerts and alert tri ## Next steps -- For an overview on activity triggers, see [View information about activity triggers](cloudknox-ui-triggers.md). -- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](cloudknox-product-rule-based-anomalies.md). -- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](cloudknox-product-statistical-anomalies.md). -- For information on permission analytics triggers, see [Create and view permission analytics triggers](cloudknox-product-permission-analytics.md). +- For an overview on activity triggers, see [View information about activity triggers](ui-triggers.md). +- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](product-rule-based-anomalies.md). +- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](product-statistical-anomalies.md). +- For information on permission analytics triggers, see [Create and view permission analytics triggers](product-permission-analytics.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-approve-privilege-request.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-approve-privilege-request.md similarity index 73% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-approve-privilege-request.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-approve-privilege-request.md index 9cbe190dbef3..9b71b530ad17 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-approve-privilege-request.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-approve-privilege-request.md @@ -1,5 +1,5 @@ --- -title: Create or approve a request for permissions in the Remediation dashboard in CloudKnox Permissions Management +title: Create or approve a request for permissions in the Remediation dashboard in Permissions Management description: How to create or approve a request for permissions in the Remediation dashboard. services: active-directory author: kenwith @@ -15,49 +15,49 @@ ms.author: kenwith # Create or approve a request for permissions > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how to create or approve a request for permissions in the **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox). You can create and approve requests for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. +This article describes how to create or approve a request for permissions in the **Remediation** dashboard in Permissions Management. You can create and approve requests for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. The **Remediation** dashboard has two privilege-on-demand (POD) workflows you can use: - **New Request**: The workflow used by a user to create a request for permissions for a specified duration. -- **Approver**: The workflow used by an approver to review and approve or reject a user’s request for permissions. +- **Approver**: The workflow used by an approver to review and approve or reject a user's request for permissions. > [!NOTE] -> To view the **Remediation** dashboard, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. +> To view the **Remediation** dashboard, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. ## Create a request for permissions -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **My Requests** subtab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **My Requests** subtab. The **My Requests** subtab displays the following options: - - **Pending**: A list of requests you’ve made but haven't yet been reviewed. + - **Pending**: A list of requests you've made but haven't yet been reviewed. - **Approved**: A list of requests that have been reviewed and approved by the approver. These requests have either already been activated or are in the process of being activated. - - **Processed**: A summary of the requests you’ve created that have been approved (**Done**), **Rejected**, and requests that have been **Canceled**. + - **Processed**: A summary of the requests you've created that have been approved (**Done**), **Rejected**, and requests that have been **Canceled**. 1. To create a request for permissions, select **New Request**. 1. In the **Roles/Tasks** page: 1. From the **Authorization System Type** dropdown, select the authorization system type you want to access: **AWS**, **Azure** or **GCP**. 1. From the **Authorization System** dropdown, select the accounts you want to access. - 1. From the **Identity** dropdown, select the identity on whose behalf you’re requesting access. + 1. From the **Identity** dropdown, select the identity on whose behalf you're requesting access. - - If the identity you select is a Security Assertions Markup Language (SAML) user, and since a SAML user accesses the system through assumption of a role, select the user’s role in **Role**. + - If the identity you select is a Security Assertions Markup Language (SAML) user, and since a SAML user accesses the system through assumption of a role, select the user's role in **Role**. - If the identity you select is a local user, to select the policies you want: 1. Select **Request Policy(s)**. 1. In **Available Policies**, select the policies you want. 1. To select a specific policy, select the plus sign, and then find and select the policy you want. - The policies you’ve selected appear in the **Selected policies** box. + The policies you've selected appear in the **Selected policies** box. - If the identity you select is a local user, to select the tasks you want: 1. Select **Request Task(s)**. 1. In **Available Tasks**, select the tasks you want. 1. To select a specific task, select the plus sign, and then select the task you want. - The tasks you’ve selected appear in the **Selected Tasks** box. + The tasks you've selected appear in the **Selected Tasks** box. If the user already has existing policies, they're displayed in **Existing Policies**. 1. Select **Next**. @@ -70,7 +70,7 @@ The **Remediation** dashboard has two privilege-on-demand (POD) workflows you ca - **No Resources** 1. In **Request Conditions**: 1. Select **JSON** to add a JSON block of code. - 1. Select **Done** to accept the code you’ve entered, or **Clear** to delete what you’ve entered and start again. + 1. Select **Done** to accept the code you've entered, or **Clear** to delete what you've entered and start again. 1. In **Effect**, select **Allow** or **Deny.** 1. Select **Next**. @@ -79,7 +79,7 @@ The **Remediation** dashboard has two privilege-on-demand (POD) workflows you ca 1. Optional: In **Note**, enter a note for the approver. 1. In **Schedule**, select when (how quickly) you want your request to be processed: - **ASAP** - - **Once** + - **Once** - In **Create Schedule**, select the **Frequency**, **Date**, **Time**, and **For** the required duration, then select **Schedule**. - **Daily** - **Weekly** @@ -92,7 +92,7 @@ The **Remediation** dashboard has two privilege-on-demand (POD) workflows you ca ## Approve or reject a request for permissions -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **My requests** subtab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **My requests** subtab. 1. To view a list of requests that haven't yet been reviewed, select **Pending Requests**. 1. In the **Request Summary** list, select the ellipses **(…)** menu on the right of a request, and then select: @@ -109,12 +109,12 @@ The **Remediation** dashboard has two privilege-on-demand (POD) workflows you ca ## Next steps -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for Amazon Web Services (AWS) identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to add and remove roles and tasks for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Add and remove roles and tasks for Azure and GCP identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for Amazon Web Services (AWS) identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to add and remove roles and tasks for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Add and remove roles and tasks for Azure and GCP identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-custom-queries.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-custom-queries.md similarity index 74% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-custom-queries.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-custom-queries.md index 181f0988bfc9..c7b44d4bd6fe 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-custom-queries.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-custom-queries.md @@ -1,6 +1,6 @@ --- -title: Create a custom query in CloudKnox Permissions Management -description: How to create a custom query in the Audit dashboard in CloudKnox Permissions Management. +title: Create a custom query in Permissions Management +description: How to create a custom query in the Audit dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -12,19 +12,19 @@ ms.date: 02/23/2022 ms.author: kenwith --- -# Create a custom query +# Create a custom query > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how you can use the **Audit** dashboard in CloudKnox Permissions Management (CloudKnox) to create custom queries that you can modify, save, and run as often as you want. +This article describes how you can use the **Audit** dashboard in Permissions Management to create custom queries that you can modify, save, and run as often as you want. ## Open the Audit dashboard -- In the CloudKnox home page, select the **Audit** tab. +- In the Permissions Management home page, select the **Audit** tab. - CloudKnox displays the query options available to you. + Permissions Management displays the query options available to you. ## Create a custom query @@ -35,23 +35,23 @@ This article describes how you can use the **Audit** dashboard in CloudKnox Perm For example, to query by a date, select **Date** in the first box. In the second and third boxes, select the down arrow, and then select one of the date-related options. 1. To add parameters, select **Add**, select the down arrow in the first box to display a dropdown of available selections. Then select the parameter you want. -1. To add more parameters to the same query, select **Add** (the plus sign), and from the first box, select **And** or **Or**. +1. To add more parameters to the same query, select **Add** (the plus sign), and from the first box, select **And** or **Or**. Repeat this step for the second and third box to complete entering the parameters. 1. To change your query as you're creating it, select **Edit** (the pencil icon), and then change the query parameters. 1. To change the parameter options, select the down arrow in each box to display a dropdown of available selections. Then select the option you want. 1. To discard your selections, select **Reset Query** for the parameter you want to change, and then make your selections again. -1. When you’re ready to run your query, select **Search**. +1. When you're ready to run your query, select **Search**. 1. To save the query, select **Save**. - CloudKnox saves the query and adds it to the **Saved Queries** list. + Permissions Management saves the query and adds it to the **Saved Queries** list. ## Save the query under a new name 1. In the **Audit** dashboard, select the ellipses menu **(…)** on the far right and select **Save As**. 2. Enter a new name for the query, and then select **Save**. - CloudKnox saves the query under the new name. Both the new query and the original query display in the **Saved Queries** list. + Permissions Management saves the query under the new name. Both the new query and the original query display in the **Saved Queries** list. ## View a saved query @@ -63,7 +63,7 @@ This article describes how you can use the **Audit** dashboard in CloudKnox Perm 4. To open the query with the authorization systems you have currently selected (which may be different from the ones you originally saved), select **Load with the currently selected authorization systems**. 5. Select **Load Queries**. - CloudKnox displays details of the query in the **Activity** table. Select a query to see its details: + Permissions Management displays details of the query in the **Activity** table. Select a query to see its details: - The **Identity Details**. - The **Domain** name. @@ -86,22 +86,22 @@ This article describes how you can use the **Audit** dashboard in CloudKnox Perm 1. In the **Audit** dashboard, select the query you want to run. - CloudKnox displays the results of the query in the **Activity** table. + Permissions Management displays the results of the query in the **Activity** table. ## Delete a query 1. In the **Audit** dashboard, load the query you want to delete. 2. Select **Delete**. - CloudKnox deletes the query. Deleted queries don't display in the **Saved Queries** list. + Permissions Management deletes the query. Deleted queries don't display in the **Saved Queries** list. ## Rename a query 1. In the **Audit** dashboard, load the query you want to rename. -2. Select the ellipses menu **(…)** on the far right, and select **Rename**. +2. Select the ellipses menu **(…)** on the far right, and select **Rename**. 3. Enter a new name for the query, and then select **Save**. - CloudKnox saves the query under the new name. Both the new query and the original query display in the **Saved Queries** list. + Permissions Management saves the query under the new name. Both the new query and the original query display in the **Saved Queries** list. ## Duplicate a query @@ -116,6 +116,6 @@ This article describes how you can use the **Audit** dashboard in CloudKnox Perm ## Next steps -- For information on how to view how users access information, see [Use queries to see how users access information](cloudknox-ui-audit-trail.md). -- For information on how to filter and view user activity, see [Filter and query user activity](cloudknox-product-audit-trail.md). -- For information on how to generate an on-demand report from a query, see [Generate an on-demand report from a query](cloudknox-howto-audit-trail-results.md). +- For information on how to view how users access information, see [Use queries to see how users access information](ui-audit-trail.md). +- For information on how to filter and view user activity, see [Filter and query user activity](product-audit-trail.md). +- For information on how to generate an on-demand report from a query, see [Generate an on-demand report from a query](how-to-audit-trail-results.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-group-based-permissions.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-group-based-permissions.md similarity index 77% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-group-based-permissions.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-group-based-permissions.md index 731a60ed97e4..51cc754dc890 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-group-based-permissions.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-group-based-permissions.md @@ -1,6 +1,6 @@ --- -title: Select group-based permissions settings in CloudKnox Permissions Management with the User management dashboard -description: How to select group-based permissions settings in CloudKnox Permissions Management with the User management dashboard. +title: Select group-based permissions settings in Permissions Management with the User management dashboard +description: How to select group-based permissions settings in Permissions Management with the User management dashboard. services: active-directory author: kenwith manager: rkarlin @@ -15,12 +15,12 @@ ms.author: kenwith # Select group-based permissions settings > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how you can create and manage group-based permissions in CloudKnox Permissions Management (CloudKnox) with the User management dashboard. +This article describes how you can create and manage group-based permissions in Permissions Management with the User management dashboard. -[!NOTE] The CloudKnox Administrator for all authorization systems will be able to create the new group based permissions. +[!NOTE] The Permissions Management Administrator for all authorization systems will be able to create the new group based permissions. ## Select administrative permissions settings for a group @@ -29,7 +29,7 @@ This article describes how you can create and manage group-based permissions in 1. In the **Set Group Permission** box, begin typing the name of an **Azure Active Directory Security Group** in your tenant. 1. Select the permission setting you want: -2. +2. - **Admin for all Authorization System Types** provides **View**, **Control**, and **Approve** permissions for all authorization system types. - **Admin for selected Authorization System Types** provides **View**, **Control**, and **Approve** permissions for selected authorization system types. - **Custom** allows you to set **View**, **Control**, and **Approve** permissions for the authorization system types that you select. @@ -51,7 +51,6 @@ This article describes how you can create and manage group-based permissions in ## Next steps -- For information about how to manage user information, see [Manage users and groups with the User management dashboard](cloudknox-ui-user-management.md). -- For information about how to view information about active and completed tasks, see [View information about active and completed tasks](cloudknox-ui-tasks.md). -- For information about how to view personal and organization information, see [View personal and organization information](cloudknox-product-account-settings.md). - +- For information about how to manage user information, see [Manage users and groups with the User management dashboard](ui-user-management.md). +- For information about how to view information about active and completed tasks, see [View information about active and completed tasks](ui-tasks.md). +- For information about how to view personal and organization information, see [View personal and organization information](product-account-settings.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-role-policy.md similarity index 78% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-role-policy.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-role-policy.md index 91218399c57d..cd2a8f0ab8be 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-role-policy.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-role-policy.md @@ -1,6 +1,6 @@ --- -title: Create a role/policy in the Remediation dashboard in CloudKnox Permissions Management -description: How to create a role/policy in the Remediation dashboard in CloudKnox Permissions Management. +title: Create a role/policy in the Remediation dashboard in Permissions Management +description: How to create a role/policy in the Remediation dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,24 +15,24 @@ ms.author: kenwith # Create a role/policy in the Remediation dashboard > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how you can use the **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) to create roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. +This article describes how you can use the **Remediation** dashboard in Permissions Management to create roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. > [!NOTE] -> To view the **Remediation** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. +> To view the **Remediation** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. > [!NOTE] -> Microsoft Azure uses the term *role* for what other cloud providers call *policy*. CloudKnox automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. +> Microsoft Azure uses the term *role* for what other cloud providers call *policy*. Permissions Management automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. ## Create a policy for AWS -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** tab. +1. On the Entra home page, select the **Remediation** tab, and then select the **Role/Policies** tab. 1. Use the dropdown lists to select the **Authorization System Type** and **Authorization System**. 1. Select **Create Policy**. 1. On the **Details** page, the **Authorization System Type** and **Authorization System** are pre-populated from your previous settings. - - To change the settings, make a selection from the dropdown. + - To change the settings, make a selection from the dropdown. 1. Under **How Would You Like To Create The Policy**, select the required option: - **Activity of User(s)**: Allows you to create a policy based on user activity. @@ -41,7 +41,7 @@ This article describes how you can use the **Remediation** dashboard in CloudKno - **Activity of Role**: Allows you to create a policy based on the aggregated activity of all the users that assumed the role. - **Activity of Tag(s)**: Allows you to create a policy based on the aggregated activity of all the tags. - **Activity of Lambda Function**: Allows you to create a new policy based on the Lambda function. - - **From Existing Policy**: Allows you to create a new policy based on an existing policy. + - **From Existing Policy**: Allows you to create a new policy based on an existing policy. - **New Policy**: Allows you to create a new policy from scratch. 1. In **Tasks performed in last**, select the duration: **90 days**, **60 days**, **30 days**, **7 days**, or **1 day**. 1. Depending on your preference, select or deselect **Include Access Advisor data.** @@ -68,30 +68,30 @@ This article describes how you can use the **Remediation** dashboard in CloudKno A message confirms that your policy has been submitted for creation -1. The [**CloudKnox Tasks**](cloudknox-ui-tasks.md) pane appears on the right. - - The **Active** tab displays a list of the policies CloudKnox is currently processing. - - The **Completed** tab displays a list of the policies CloudKnox has completed. +1. The [**Permissions Management Tasks**](ui-tasks.md) pane appears on the right. + - The **Active** tab displays a list of the policies Permissions Management is currently processing. + - The **Completed** tab displays a list of the policies Permissions Management has completed. 1. Refresh the **Role/Policies** tab to see the policy you created. ## Create a role for Azure -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** tab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Role/Policies** tab. 1. Use the dropdown lists to select the **Authorization System Type** and **Authorization System**. 1. Select **Create Role**. 1. On the **Details** page, the **Authorization System Type** and **Authorization System** are pre-populated from your previous settings. - - To change the settings, select the box and make a selection from the dropdown. + - To change the settings, select the box and make a selection from the dropdown. 1. Under **How Would You Like To Create The Role?**, select the required option: - **Activity of User(s)**: Allows you to create a role based on user activity. - **Activity of Group(s)**: Allows you to create a role based on the aggregated activity of all the users belonging to the group(s). - **Activity of App(s)**: Allows you to create a role based on the aggregated activity of all apps. - - **From Existing Role**: Allows you to create a new role based on an existing role. + - **From Existing Role**: Allows you to create a new role based on an existing role. - **New Role**: Allows you to create a new role from scratch. 1. In **Tasks performed in last**, select the duration: **90 days**, **60 days**, **30 days**, **7 days**, or **1 day**. -1. Depending on your preference: +1. Depending on your preference: - Select or deselect **Ignore Non-Microsoft Read Actions**. - Select or deselect **Include Read-Only Tasks**. 1. In **Settings**, from the **Available** column, select the plus sign **(+)** to move the identity into the **Selected** column, and then select **Next**. @@ -113,24 +113,24 @@ This article describes how you can use the **Remediation** dashboard in CloudKno A message confirms that your role has been submitted for creation -1. The [**CloudKnox Tasks**](cloudknox-ui-tasks.md) pane appears on the right. - - The **Active** tab displays a list of the policies CloudKnox is currently processing. - - The **Completed** tab displays a list of the policies CloudKnox has completed. +1. The [**Permissions Management Tasks**](ui-tasks.md) pane appears on the right. + - The **Active** tab displays a list of the policies Permissions Management is currently processing. + - The **Completed** tab displays a list of the policies Permissions Management has completed. 1. Refresh the **Role/Policies** tab to see the role you created. ## Create a role for GCP -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** tab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Role/Policies** tab. 1. Use the dropdown lists to select the **Authorization System Type** and **Authorization System**. 1. Select **Create Role**. 1. On the **Details** page, the **Authorization System Type** and **Authorization System** are pre-populated from your previous settings. - - To change the settings, select the box and make a selection from the dropdown. + - To change the settings, select the box and make a selection from the dropdown. 1. Under **How Would You Like To Create The Role?**, select the required option: - **Activity of User(s)**: Allows you to create a role based on user activity. - **Activity of Group(s)**: Allows you to create a role based on the aggregated activity of all the users belonging to the group(s). - **Activity of Service Account(s)**: Allows you to create a role based on the aggregated activity of all service accounts. - - **From Existing Role**: Allows you to create a new role based on an existing role. + - **From Existing Role**: Allows you to create a new role based on an existing role. - **New Role**: Allows you to create a new role from scratch. 1. In **Tasks performed in last**, select the duration: **90 days**, **60 days**, **30 days**, **7 days**, or **1 day**. @@ -151,21 +151,21 @@ This article describes how you can use the **Remediation** dashboard in CloudKno 1. Select **Submit**. A message confirms that your role has been submitted for creation -1. The [**CloudKnox Tasks**](cloudknox-ui-tasks.md) pane appears on the right. +1. The [**Permissions Management Tasks**](ui-tasks.md) pane appears on the right. - - The **Active** tab displays a list of the policies CloudKnox is currently processing. - - The **Completed** tab displays a list of the policies CloudKnox has completed. + - The **Active** tab displays a list of the policies Permissions Management is currently processing. + - The **Completed** tab displays a list of the policies Permissions Management has completed. 1. Refresh the **Role/Policies** tab to see the role you created. ## Next steps -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to modify a role/policy, see [Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). -- For information on how to view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md) +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to modify a role/policy, see [Modify a role/policy](how-to-modify-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). +- For information on how to view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-rule.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-rule.md similarity index 78% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-rule.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-rule.md index 38c00f0e645a..d2da0287aecb 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-create-rule.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-create-rule.md @@ -1,6 +1,6 @@ --- -title: Create a rule in the Autopilot dashboard in CloudKnox Permissions Management -description: How to create a rule in the Autopilot dashboard in CloudKnox Permissions Management. +title: Create a rule in the Autopilot dashboard in Permissions Management +description: How to create a rule in the Autopilot dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,19 +15,19 @@ ms.author: kenwith # Create a rule in the Autopilot dashboard > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to create a rule in the CloudKnox Permissions Management (CloudKnox) **Autopilot** dashboard. + +This article describes how to create a rule in the Permissions Management **Autopilot** dashboard. > [!NOTE] -> Only users with **Administrator** permissions can view and make changes on the Autopilot tab. If you don’t have these permissions, contact your system administrator. +> Only users with **Administrator** permissions can view and make changes on the Autopilot tab. If you don't have these permissions, contact your system administrator. -## Create a rule +## Create a rule -1. In the CloudKnox home page, select the **Autopilot** tab. +1. In the Permissions Management home page, select the **Autopilot** tab. 1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. 1. In the **Autopilot** dashboard, select **New Rule**. 1. In the **Rule Name** box, enter a name for your rule. 1. Select **AWS**, **Azure**, **GCP**, and then select **Next**. @@ -54,7 +54,7 @@ This article describes how to create a rule in the CloudKnox Permissions Managem - **Rule Name**: The name of the rule. - **State**: The status of the rule: idle (not being use) or active (being used). - - **Rule Type**: The type of rule being applied. + - **Rule Type**: The type of rule being applied. - **Mode**: The status of the mode: on-demand or not. - **Last Generated**: The date and time the rule was last generated. - **Created By**: The email address of the user who created the rule. @@ -66,6 +66,6 @@ This article describes how to create a rule in the CloudKnox Permissions Managem ## Next steps -- For more information about viewing rules, see [View roles in the Autopilot dashboard](cloudknox-ui-autopilot.md). -- For information about generating, viewing, and applying rule recommendations for rules, see [Generate, view, and apply rule recommendations for rules](cloudknox-howto-recommendations-rule.md). -- For information about notification settings for rules, see [View notification settings for a rule](cloudknox-howto-notifications-rule.md). +- For more information about viewing rules, see [View roles in the Autopilot dashboard](ui-autopilot.md). +- For information about generating, viewing, and applying rule recommendations for rules, see [Generate, view, and apply rule recommendations for rules](how-to-recommendations-rule.md). +- For information about notification settings for rules, see [View notification settings for a rule](how-to-notifications-rule.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-delete-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-delete-role-policy.md similarity index 60% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-delete-role-policy.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-delete-role-policy.md index 5339d078bcdd..6cb3b89f7592 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-delete-role-policy.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-delete-role-policy.md @@ -1,5 +1,5 @@ --- -title: Delete a role/policy in the Remediation dashboard in CloudKnox Permissions Management +title: Delete a role/policy in the Remediation dashboard in Permissions Management description: How to delete a role/policy in the Just Enough Permissions (JEP) Controller. services: active-directory author: kenwith @@ -15,23 +15,23 @@ ms.author: kenwith # Delete a role/policy in the Remediation dashboard > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how you can use the **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) to delete roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. +This article describes how you can use the **Remediation** dashboard in Permissions Management to delete roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. > [!NOTE] -> To view the **Remediation** dashboard, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. +> To view the **Remediation** dashboard, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. > [!NOTE] -> Microsoft Azure uses the term *role* for what other Cloud providers call *policy*. CloudKnox automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. +> Microsoft Azure uses the term *role* for what other Cloud providers call *policy*. Permissions Management automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. ## Delete a role/policy -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** subtab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Role/Policies** subtab. 1. Select the role/policy you want to delete, and from the **Actions** column, select **Delete**. - You can only delete a role/policy if it isn't assigned to an identity. + You can only delete a role/policy if it isn't assigned to an identity. You can't delete system roles/policies. @@ -40,12 +40,12 @@ This article describes how you can use the **Remediation** dashboard in CloudKno ## Next steps -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). -- For information on how to view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md) +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). +- For information on how to view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-modify-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-modify-role-policy.md similarity index 61% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-modify-role-policy.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-modify-role-policy.md index b04e1e695c7f..8c51e75c7c2f 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-modify-role-policy.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-modify-role-policy.md @@ -1,6 +1,6 @@ --- -title: Modify a role/policy in the Remediation dashboard in CloudKnox Permissions Management -description: How to modify a role/policy in the Remediation dashboard in CloudKnox Permissions Management. +title: Modify a role/policy in the Remediation dashboard in Permissions Management +description: How to modify a role/policy in the Remediation dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,20 +15,20 @@ ms.author: kenwith # Modify a role/policy in the Remediation dashboard > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how you can use the **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) to modify roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. +This article describes how you can use the **Remediation** dashboard in Permissions Management to modify roles/policies for the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. > [!NOTE] -> To view the **Remediation** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. +> To view the **Remediation** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. > [!NOTE] -> Microsoft Azure uses the term *role* for what other cloud providers call *policy*. CloudKnox automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. +> Microsoft Azure uses the term *role* for what other cloud providers call *policy*. Permissions Management automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. ## Modify a role/policy -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** tab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Role/Policies** tab. 1. Select the role/policy you want to modify, and from the **Actions** column, select **Modify**. You can't modify **System** policies and roles. @@ -39,12 +39,12 @@ This article describes how you can use the **Remediation** dashboard in CloudKno ## Next steps -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). -- For information on how to view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md) +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). +- For information on how to view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-notifications-rule.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-notifications-rule.md similarity index 63% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-notifications-rule.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-notifications-rule.md index 54d9c277b0b4..08e466861d3f 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-notifications-rule.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-notifications-rule.md @@ -1,6 +1,6 @@ --- -title: View notification settings for a rule in the Autopilot dashboard in CloudKnox Permissions Management -description: How to view notification settings for a rule in the Autopilot dashboard in CloudKnox Permissions Management. +title: View notification settings for a rule in the Autopilot dashboard in Permissions Management +description: How to view notification settings for a rule in the Autopilot dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,30 +15,30 @@ ms.author: kenwith # View notification settings for a rule in the Autopilot dashboard > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to view notification settings for a rule in the CloudKnox Permissions Management (CloudKnox) **Autopilot** dashboard. + +This article describes how to view notification settings for a rule in the Permissions Management **Autopilot** dashboard. > [!NOTE] -> Only users with **Administrator** permissions can view and make changes on the Autopilot tab. If you don’t have these permissions, contact your system administrator. +> Only users with **Administrator** permissions can view and make changes on the Autopilot tab. If you don't have these permissions, contact your system administrator. -## View notification settings for a rule +## View notification settings for a rule -1. In the CloudKnox home page, select the **Autopilot** tab. +1. In the Permissions Management home page, select the **Autopilot** tab. 1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. 1. In the **Autopilot** dashboard, select a rule. 1. In the far right of the row, select the ellipses **(...)** -1. To view notification settings for a rule, select **Notification Settings**. +1. To view notification settings for a rule, select **Notification Settings**. - CloudKnox displays a list of subscribed users. These users are signed up to receive notifications for the selected rule. + Permissions Management displays a list of subscribed users. These users are signed up to receive notifications for the selected rule. 1. To close the **Notification Settings** box, select **Close**. ## Next steps -- For more information about viewing rules, see [View roles in the Autopilot dashboard](cloudknox-ui-autopilot.md). -- For information about creating rules, see [Create a rule](cloudknox-howto-create-rule.md). -- For information about generating, viewing, and applying rule recommendations for rules, see [Generate, view, and apply rule recommendations for rules](cloudknox-howto-recommendations-rule.md). +- For more information about viewing rules, see [View roles in the Autopilot dashboard](ui-autopilot.md). +- For information about creating rules, see [Create a rule](how-to-create-rule.md). +- For information about generating, viewing, and applying rule recommendations for rules, see [Generate, view, and apply rule recommendations for rules](how-to-recommendations-rule.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-recommendations-rule.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-recommendations-rule.md similarity index 67% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-recommendations-rule.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-recommendations-rule.md index f73e725c3909..2d83f8b4a469 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-recommendations-rule.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-recommendations-rule.md @@ -1,6 +1,6 @@ --- -title: Generate, view, and apply rule recommendations in the Autopilot dashboard in CloudKnox Permissions Management -description: How to generate, view, and apply rule recommendations in the Autopilot dashboard in CloudKnox Permissions Management. +title: Generate, view, and apply rule recommendations in the Autopilot dashboard in Permissions Management +description: How to generate, view, and apply rule recommendations in the Autopilot dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,67 +15,67 @@ ms.author: kenwith # Generate, view, and apply rule recommendations in the Autopilot dashboard > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. - -This article describes how to generate and view rule recommendations in the CloudKnox Permissions Management (CloudKnox) **Autopilot** dashboard. + +This article describes how to generate and view rule recommendations in the Permissions Management **Autopilot** dashboard. > [!NOTE] -> Only users with **Administrator** permissions can view and make changes on the Autopilot tab. If you don’t have these permissions, contact your system administrator. +> Only users with **Administrator** permissions can view and make changes on the Autopilot tab. If you don't have these permissions, contact your system administrator. -## Generate rule recommendations +## Generate rule recommendations -1. In the CloudKnox home page, select the **Autopilot** tab. +1. In the Permissions Management home page, select the **Autopilot** tab. 1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. 1. In the **Autopilot** dashboard, select a rule. 1. In the far right of the row, select the ellipses **(...)**. -1. To generate recommendations for each user and the authorization system, select **Generate Recommendations**. +1. To generate recommendations for each user and the authorization system, select **Generate Recommendations**. Only the user who created the selected rule can generate a recommendation. 1. View your recommendations in the **Recommendations** subtab. 1. Select **Close** to close the **Recommendations** subtab. -## View rule recommendations +## View rule recommendations -1. In the CloudKnox home page, select the **Autopilot** tab. +1. In the Permissions Management home page, select the **Autopilot** tab. 1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. 1. In the **Autopilot** dashboard, select a rule. 1. In the far right of the row, select the ellipses **(...)** -1. To view recommendations for each user and the authorization system, select **View Recommendations**. +1. To view recommendations for each user and the authorization system, select **View Recommendations**. - CloudKnox displays the recommendations for each user and authorization system in the **Recommendations** subtab. + Permissions Management displays the recommendations for each user and authorization system in the **Recommendations** subtab. 1. Select **Close** to close the **Recommendations** subtab. -## Apply rule recommendations +## Apply rule recommendations -1. In the CloudKnox home page, select the **Autopilot** tab. +1. In the Permissions Management home page, select the **Autopilot** tab. 1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. 1. In the **Autopilot** dashboard, select a rule. 1. In the far right of the row, select the ellipses **(...)** -1. To view recommendations for each user and the authorization system, select **View Recommendations**. +1. To view recommendations for each user and the authorization system, select **View Recommendations**. - CloudKnox displays the recommendations for each user and authorization system in the **Recommendations** subtab. + Permissions Management displays the recommendations for each user and authorization system in the **Recommendations** subtab. 1. To apply a recommendation, select the **Apply Recommendations** subtab, and then select a recommendation. 1. Select **Close** to close the **Recommendations** subtab. -## Unapply rule recommendations +## Unapply rule recommendations -1. In the CloudKnox home page, select the **Autopilot** tab. +1. In the Permissions Management home page, select the **Autopilot** tab. 1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). -1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. +1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want, and then select **Apply**. 1. In the **Autopilot** dashboard, select a rule. 1. In the far right of the row, select the ellipses **(...)** -1. To view recommendations for each user and the authorization system, select **View Recommendations**. +1. To view recommendations for each user and the authorization system, select **View Recommendations**. - CloudKnox displays the recommendations for each user and authorization system in the **Recommendations** subtab. + Permissions Management displays the recommendations for each user and authorization system in the **Recommendations** subtab. 1. To remove a recommendation, select the **Unapply Recommendations** subtab, and then select a recommendation. 1. Select **Close** to close the **Recommendations** subtab. @@ -83,6 +83,6 @@ This article describes how to generate and view rule recommendations in the Clou ## Next steps -- For more information about viewing rules, see [View roles in the Autopilot dashboard](cloudknox-ui-autopilot.md). -- For information about creating rules, see [Create a rule](cloudknox-howto-create-rule.md). -- For information about notification settings for rules, see [View notification settings for a rule](cloudknox-howto-notifications-rule.md). +- For more information about viewing rules, see [View roles in the Autopilot dashboard](ui-autopilot.md). +- For information about creating rules, see [Create a rule](how-to-create-rule.md). +- For information about notification settings for rules, see [View notification settings for a rule](how-to-notifications-rule.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-revoke-task-readonly-status.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-revoke-task-readonly-status.md similarity index 77% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-revoke-task-readonly-status.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-revoke-task-readonly-status.md index d2c5e51db065..85a0a4465fe8 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-revoke-task-readonly-status.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-revoke-task-readonly-status.md @@ -1,6 +1,6 @@ --- -title: Revoke access to high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in CloudKnox Permissions Management -description: How to revoke access to high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in CloudKnox Permissions Management. +title: Revoke access to high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in Permissions Management +description: How to revoke access to high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities in the Remediation dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -16,24 +16,24 @@ ms.author: kenwith > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. This article describes how you can revoke high-risk and unused tasks or assign read-only status for Microsoft Azure and Google Cloud Platform (GCP) identities using the **Remediation** dashboard. > [!NOTE] -> To view the **Remediation** tab, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. +> To view the **Remediation** tab, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. ## View an identity's permissions -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. 1. From the **Authorization System** dropdown, select the accounts you want to access. 1. From the **Search for** dropdown, select **Group**, **User**, or **APP/Service Account**. 1. To search for more parameters, you can make a selection from the **User States**, **Permission Creep Index**, and **Task Usage** dropdowns. 1. Select **Apply**. - CloudKnox displays a list of groups, users, and service accounts that match your criteria. + Permissions Management displays a list of groups, users, and service accounts that match your criteria. 1. In **Enter a username**, enter or select a user. 1. In **Enter a Group Name**, enter or select a group, then select **Apply**. 1. Make a selection from the results list. @@ -43,69 +43,69 @@ This article describes how you can revoke high-risk and unused tasks or assign r ## Revoke an identity's access to unused tasks -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. 1. From the **Authorization System** dropdown, select the accounts you want to access. 1. From the **Search for** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. 1. Make a selection from the results list. 1. To revoke an identity's access to tasks they aren't using, select **Revoke Unused Tasks**. -1. When the following message displays: **Are you sure you want to change permission?**, select: +1. When the following message displays: **Are you sure you want to change permission?**, select: - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - **Execute** to change the permission. - **Close** to cancel the action. ## Revoke an identity's access to high-risk tasks -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. 1. From the **Authorization System** dropdown, select the accounts you want to access. 1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. 1. Make a selection from the results list. 1. To revoke an identity's access to high-risk tasks, select **Revoke High-Risk Tasks**. -1. When the following message displays: **Are you sure you want to change permission?**, select: +1. When the following message displays: **Are you sure you want to change permission?**, select: - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - **Execute** to change the permission. - **Close** to cancel the action. ## Revoke an identity's ability to delete tasks -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. 1. From the **Authorization System** dropdown, select the accounts you want to access. 1. From the **Search For** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. 1. Make a selection from the results list. 1. To revoke an identity's ability to delete tasks, select **Revoke Delete Tasks**. -1. When the following message displays: **Are you sure you want to change permission?**, select: +1. When the following message displays: **Are you sure you want to change permission?**, select: - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - **Execute** to change the permission. - **Close** to cancel the action. ## Assign read-only status to an identity -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Permissions** subtab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Permissions** subtab. 1. From the **Authorization System Type** dropdown, select **Azure** or **GCP**. 1. From the **Authorization System** dropdown, select the accounts you want to access. 1. From the **Search for** dropdown, select **Group**, **User**, or **APP/Service Account**, and then select **Apply**. 1. Make a selection from the results list. 1. To assign read-only status to an identity, select **Assign Read-Only Status**. -1. When the following message displays: **Are you sure you want to change permission?**, select: +1. When the following message displays: **Are you sure you want to change permission?**, select: - **Generate Script** to generate a script where you can manually add/remove the permissions you selected. - **Execute** to change the permission. - **Close** to cancel the action. - + ## Next steps -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to add and remove roles and tasks for Azure and GCP identities, see [Add and remove roles and tasks for Azure and GCP identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to add and remove roles and tasks for Azure and GCP identities, see [Add and remove roles and tasks for Azure and GCP identities](how-to-attach-detach-permissions.md). +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-view-role-policy.md b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-view-role-policy.md similarity index 63% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-view-role-policy.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/how-to-view-role-policy.md index a6574d3ae8d2..9c1e939b897f 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-howto-view-role-policy.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/how-to-view-role-policy.md @@ -1,6 +1,6 @@ --- -title: View information about roles/ policies in the Remediation dashboard in CloudKnox Permissions Management -description: How to view and filter information about roles/ policies in the Remediation dashboard in CloudKnox Permissions Management. +title: View information about roles/ policies in the Remediation dashboard in Permissions Management +description: How to view and filter information about roles/ policies in the Remediation dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,41 +15,41 @@ ms.author: kenwith # View information about roles/ policies in the Remediation dashboard > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) enables system administrators to view, adjust, and remediate excessive permissions based on a user's activity data. You can use the **Roles/Policies** subtab in the dashboard to view information about roles and policies in the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. +The **Remediation** dashboard in Permissions Management enables system administrators to view, adjust, and remediate excessive permissions based on a user's activity data. You can use the **Roles/Policies** subtab in the dashboard to view information about roles and policies in the Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) authorization systems. > [!NOTE] -> To view the **Remediation dashboard** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. +> To view the **Remediation dashboard** tab, you must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this tab, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. > [!NOTE] -> Microsoft Azure uses the term *role* for what other Cloud providers call *policy*. CloudKnox automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. +> Microsoft Azure uses the term *role* for what other Cloud providers call *policy*. Permissions Management automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. ## View information about roles/policies -1. On the CloudKnox home page, select the **Remediation** tab, and then select the **Role/Policies** subtab. +1. On the Permissions Management home page, select the **Remediation** tab, and then select the **Role/Policies** subtab. The **Role/Policies list** displays a list of existing roles/policies and the following information about each role/policy - **Role/Policy Name**: The name of the roles/policies available to you. - - **Role/Policy Type**: **Custom**, **System**, or **CloudKnox Only** + - **Role/Policy Type**: **Custom**, **System**, or **Permissions Management Only** - **Actions**: The type of action you can perform on the role/policy, **Clone**, **Modify**, or **Delete** -1. To display details about the role/policy and view its assigned tasks and identities, select the arrow to the left of the role/policy name. +1. To display details about the role/policy and view its assigned tasks and identities, select the arrow to the left of the role/policy name. The **Tasks** list appears, displaying: - A list of **Tasks**. - - **For AWS:** + - **For AWS:** - The **Users**, **Groups**, and **Roles** the task is **Directly Assigned To**. - - The **Group Members** and **Role Identities** the task is **Indirectly Accessible By**. + - The **Group Members** and **Role Identities** the task is **Indirectly Accessible By**. - - **For Azure:** + - **For Azure:** - The **Users**, **Groups**, **Enterprise Applications** and **Managed Identities** the task is **Directly Assigned To**. - The **Group Members** the task is **Indirectly Accessible By**. - - **For GCP:** + - **For GCP:** - The **Users**, **Groups**, and **Service Accounts** the task is **Directly Assigned To**. - The **Group Members** the task is **Indirectly Accessible By**. @@ -57,11 +57,11 @@ The **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) en ## Export information about roles/policies -- **Export CSV**: Select this option to export the displayed list of roles/policies as a comma-separated values (CSV) file. +- **Export CSV**: Select this option to export the displayed list of roles/policies as a comma-separated values (CSV) file. When the file is successfully exported, a message appears: **Exported Successfully.** - - Check your email for a message from the CloudKnox Customer Success Team. This email contains a link to: + - Check your email for a message from the Permissions Management Customer Success Team. This email contains a link to: - The **Role Policy Details** report in CSV format. - The **Reports** dashboard where you can configure how and when you can automatically receive reports. @@ -70,7 +70,7 @@ The **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) en ## Filter information about roles/policies -1. On the CloudKnox home page, select the **Remediation** dashboard, and then select the **Role/Policies** tab. +1. On the Permissions Management home page, select the **Remediation** dashboard, and then select the **Role/Policies** tab. 1. To filter the roles/policies, select from the following options: - **Authorization System Type**: Select **AWS**, **Azure**, or **GCP**. @@ -78,9 +78,9 @@ The **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) en - **Role/Policy Type**: Select from the following options: - **All**: All managed roles/policies. - - **Custom**: A customer-managed role/policy. - - **System**: A cloud service provider-managed role/policy. - - **CloudKnox Only**: A role/policy created by CloudKnox. + - **Custom**: A customer-managed role/policy. + - **System**: A cloud service provider-managed role/policy. + - **Permissions Management Only**: A role/policy created by Permissions Management. - **Role/Policy Status**: Select **All**, **Assigned**, or **Unassigned**. - **Role/Policy Usage**: Select **All** or **Unused**. @@ -91,12 +91,12 @@ The **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) en ## Next steps -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- For information on how to attach and detach permissions AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). -- For information on how to view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md) +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- For information on how to attach and detach permissions AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). +- For information on how to view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/index.yml b/articles/active-directory/cloud-infrastructure-entitlement-management/index.yml index ad09baeee4cf..89f86084a634 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/index.yml +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/index.yml @@ -1,11 +1,11 @@ ### YamlMime:Landing -title: CloudKnox Permissions Management -summary: CloudKnox Permissions Management is a cloud infrastructure entitlement management (CIEM) solution that provides comprehensive visibility into permissions assigned to all identities (users and workloads), actions, and resources across cloud infrastructures. It detects, right-sizes, and monitors unused and excessive permissions and enables Zero Trust security through least privilege access in Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). +title: Permissions Management +summary: Permissions Management is a cloud infrastructure entitlement management (CIEM) solution that provides comprehensive visibility into permissions assigned to all identities (users and workloads), actions, and resources across cloud infrastructures. It detects, right-sizes, and monitors unused and excessive permissions and enables Zero Trust security through least privilege access in Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). metadata: - title: CloudKnox Permissions Management - description: Learn how to use CloudKnox Permissions Management and Cloud Infrastructure Entitlement Management (CIEM) + title: Permissions Management + description: Learn how to use Permissions Management and Cloud Infrastructure Entitlement Management (CIEM) services: active-directory author: kenwith manager: rkarlin @@ -15,8 +15,8 @@ metadata: ms.topic: landing-page ms.date: 03/09/2022 ms.author: kenwith - - + + # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | whats-new @@ -24,104 +24,102 @@ landingContent: # Cards and links should be based on top customer tasks or top subjects # Start card title with a verb # Card - - title: What's CloudKnox Permissions Management? + - title: What's Permissions Management? linkLists: - linkListType: overview links: - text: Overview - url: cloudknox-overview.md + url: overview.md # Card - - title: Onboard CloudKnox Permissions Management + - title: Onboard Permissions Management linkLists: - linkListType: overview links: - - text: Enable CloudKnox - url: cloudknox-onboard-enable-tenant.md + - text: Enable Permissions Management + url: onboard-enable-tenant.md # Card - title: View risk metrics in your authorization system linkLists: - linkListType: overview links: - text: View key statistics and data about your authorization system - url: cloudknox-ui-dashboard.md + url: ui-dashboard.md # Card - title: Configure settings for data collection linkLists: - linkListType: overview links: - text: View and configure settings for data collection - url: cloudknox-product-data-sources.md + url: product-data-sources.md # Card # - title: Manage organizational and personal information # linkLists: # - linkListType: overview # links: # - text: Set personal information and preferences - # url: cloudknox-product-account-settings.md + # url: product-account-settings.md # Card - title: View information about identities linkLists: - linkListType: overview links: - text: View information about identities - url: cloudknox-usage-analytics-home.md + url: usage-analytics-home.md - text: View how users access information - url: cloudknox-ui-audit-trail.md + url: ui-audit-trail.md # Card - title: Manage roles/policies and permission requests linkLists: - linkListType: overview links: - text: View existing roles/policies and requests for permission - url: cloudknox-ui-remediation.md + url: ui-remediation.md # Card # - title: View how users access information # linkLists: # - linkListType: overview # links: # - text: View how users access information - # url: cloudknox-ui-audit-trail.md + # url: ui-audit-trail.md # Card - title: Set activity alerts and triggers linkLists: - linkListType: overview links: - text: View information about activity triggers - url: cloudknox-ui-triggers.md + url: ui-triggers.md # Card - title: Manage rules for authorization systems linkLists: - linkListType: overview links: - text: Create and view rules in the Autopilot dashboard - url: cloudknox-ui-autopilot.md + url: ui-autopilot.md # Card - title: Generate reports linkLists: - linkListType: overview links: - text: Generate and view a system report - url: cloudknox-report-view-system-report.md + url: report-view-system-report.md # Card - # - title: Learn with CloudKnox videos + # - title: Learn with Permissions Management videos # linkLists: # - linkListType: overview # links: - # - text: CloudKnox Permissions Management training videos - # url: cloudknox-training-videos.md + # - text: Permissions Management training videos + # url: training-videos.md # Card - title: FAQs linkLists: - linkListType: overview links: - text: FAQs - url: cloudknox-faqs.md + url: faqs.md # Card - title: Troubleshoot linkLists: - linkListType: overview links: - text: Troubleshoot - url: cloudknox-troubleshoot.md - - + url: troubleshoot.md diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-integration-api.md b/articles/active-directory/cloud-infrastructure-entitlement-management/integration-api.md similarity index 84% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-integration-api.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/integration-api.md index 2bcae5561976..75795ba23921 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-integration-api.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/integration-api.md @@ -1,6 +1,6 @@ --- -title: Set and view configuration settings in CloudKnox Permissions Management -description: How to view the CloudKnox Permissions Management API integration settings and create service accounts and roles. +title: Set and view configuration settings in Permissions Management +description: How to view the Permissions Management API integration settings and create service accounts and roles. services: active-directory author: kenwith manager: rkarlin @@ -15,10 +15,10 @@ ms.author: kenwith # Set and view configuration settings > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This topic describes how to view configuration settings, create and delete a service account, and create a role in CloudKnox Permissions Management (CloudKnox). +This topic describes how to view configuration settings, create and delete a service account, and create a role in Permissions Management. ## View configuration settings @@ -30,7 +30,7 @@ The **Integrations** dashboard displays the authorization systems available to y 1. Select an authorization system tile to view the following integration information: - 1. To find out more about the CloudKnox API, select **CloudKnox API**, and then select documentation. + 1. To find out more about the Permissions Management API, select **Permissions Management API**, and then select documentation. 1. To view information about service accounts, select **Integration**: @@ -43,7 +43,7 @@ The **Integrations** dashboard displays the authorization systems available to y 1. To view settings information, select **Settings**: - **Roles can create service account**: Lists the type of roles you can create. - - **Access Key Rotation Policy**: Lists notifications and actions you can set. + - **Access Key Rotation Policy**: Lists notifications and actions you can set. - **Access Key Usage Policy**: Lists notifications and actions you can set. ## Create a service account @@ -67,7 +67,7 @@ The **Integrations** dashboard displays the authorization systems available to y 1. On the **Integrations** dashboard, select **User**, and then select **Integrations.** 1. On the right of the email address, select **Delete Service Account**. - + On the **Validate OTP To Delete [Service Name] Integration** box, a message displays asking you to check your email for a code sent to the email address on file. If you don't receive the code, select **Resend OTP**. @@ -79,9 +79,9 @@ The **Integrations** dashboard displays the authorization systems available to y ## Create a role 1. On the **Integrations** dashboard, select **User**, and then select **Settings**. -2. Under **Roles can create service account**, select the role you want: +2. Under **Roles can create service account**, select the role you want: - **Super Admin** - - **Viewer** + - **Viewer** - **Controller** 3. In the **Access Key Rotation Policy** column, select options for the following: @@ -100,6 +100,6 @@ The **Integrations** dashboard displays the authorization systems available to y - - - \ No newline at end of file + + + \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-multi-cloud-glossary.md b/articles/active-directory/cloud-infrastructure-entitlement-management/multi-cloud-glossary.md similarity index 67% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-multi-cloud-glossary.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/multi-cloud-glossary.md index c18ec28669bb..a23f7007f570 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-multi-cloud-glossary.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/multi-cloud-glossary.md @@ -1,6 +1,6 @@ --- -title: CloudKnox Permissions Management - The CloudKnox glossary -description: CloudKnox Permissions Management glossary +title: Permissions Management glossary +description: Permissions Management glossary services: active-directory author: kenwith manager: rkarlin @@ -12,13 +12,13 @@ ms.date: 02/23/2022 ms.author: kenwith --- -# The CloudKnox glossary +# The Permissions Management glossary > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This glossary provides a list of some of the commonly used cloud terms in CloudKnox Permissions Management (CloudKnox). These terms will help CloudKnox users navigate through cloud-specific terms and cloud-generic terms. +This glossary provides a list of some of the commonly used cloud terms in Permissions Management. These terms will help Permissions Management users navigate through cloud-specific terms and cloud-generic terms. ## Commonly-used acronyms and terms @@ -34,7 +34,7 @@ This glossary provides a list of some of the commonly used cloud terms in CloudK | CIEM | Cloud Infrastructure Entitlement Management. The next generation of solutions for enforcing least privilege in the cloud. It addresses cloud-native security challenges of managing identity access management in cloud environments. | | CIS | Cloud infrastructure security | | CWP | Cloud Workload Protection. A workload-centric security solution that targets the unique protection requirements of workloads in modern enterprise environments. | -| CNAPP | Cloud-Native Application Protection. The convergence of cloud security posture management (CSPM), cloud workload protection (CWP), cloud infrastructure entitlement management (CIEM), and cloud applications security broker (CASB). An integrated security approach that covers the entire lifecycle of cloud-native applications. | +| CNAPP | Cloud-Native Application Protection. The convergence of cloud security posture management (CSPM), cloud workload protection (CWP), cloud infrastructure entitlement management (CIEM), and cloud applications security broker (CASB). An integrated security approach that covers the entire lifecycle of cloud-native applications. | | CSPM | Cloud Security Posture Management. Addresses risks of compliance violations and misconfigurations in enterprise cloud environments. Also focuses on the resource level to identify deviations from best practice security settings for cloud governance and compliance. | | CWPP | Cloud Workload Protection Platform | | Data Collector | Virtual entity which stores the data collection configuration | @@ -43,40 +43,40 @@ This glossary provides a list of some of the commonly used cloud terms in CloudK | Entitlement | An abstract attribute that represents different forms of user permissions in a range of infrastructure systems and business applications.| | Entitlement management | Technology that grants, resolves, enforces, revokes, and administers fine-grained access entitlements (that is, authorizations, privileges, access rights, permissions and rules). Its purpose is to execute IT access policies to structured/unstructured data, devices, and services. It can be delivered by different technologies, and is often different across platforms, applications, network components, and devices. | | High-risk task | A task in which a user can cause data leakage, service disruption, or service degradation. | -| Hybrid cloud | Sometimes called a cloud hybrid. A computing environment that combines an on-premises data center (a private cloud) with a public cloud. It allows data and applications to be shared between them. | +| Hybrid cloud | Sometimes called a cloud hybrid. A computing environment that combines an on-premises data center (a private cloud) with a public cloud. It allows data and applications to be shared between them. | | hybrid cloud storage | A private or public cloud used to store an organization's data. | -| ICM | Incident Case Management | -| IDS | Intrusion Detection Service | +| ICM | Incident Case Management | +| IDS | Intrusion Detection Service | | Identity analytics | Includes basic monitoring and remediation, dormant and orphan account detection and removal, and privileged account discovery. | | Identity lifecycle management | Maintain digital identities, their relationships with the organization, and their attributes during the entire process from creation to eventual archiving, using one or more identity life cycle patterns. | | IGA | Identity governance and administration. Technology solutions that conduct identity management and access governance operations. IGA includes the tools, technologies, reports, and compliance activities required for identity lifecycle management. It includes every operation from account creation and termination to user provisioning, access certification, and enterprise password management. It looks at automated workflow and data from authoritative sources capabilities, self-service user provisioning, IT governance, and password management. | -| ITSM | Information Technology Security Management. Tools that enable IT operations organizations (infrastructure and operations managers), to better support the production environment. Facilitate the tasks and workflows associated with the management and delivery of quality IT services. | +| ITSM | Information Technology Security Management. Tools that enable IT operations organizations (infrastructure and operations managers), to better support the production environment. Facilitate the tasks and workflows associated with the management and delivery of quality IT services. | | JEP | Just Enough Permissions | -| JIT | Just in Time access can be seen as a way to enforce the principle of least privilege to ensure users and non-human identities are given the minimum level of privileges. It also ensures that privileged activities are conducted in accordance with an organization’s Identity Access Management (IAM), IT Service Management (ITSM), and Privileged Access Management (PAM) policies, with its entitlements and workflows. JIT access strategy enables organizations to maintain a full audit trail of privileged activities so they can easily identify who or what gained access to which systems, what they did at what time, and for how long. | +| JIT | Just in Time access can be seen as a way to enforce the principle of least privilege to ensure users and non-human identities are given the minimum level of privileges. It also ensures that privileged activities are conducted in accordance with an organization's Identity Access Management (IAM), IT Service Management (ITSM), and Privileged Access Management (PAM) policies, with its entitlements and workflows. JIT access strategy enables organizations to maintain a full audit trail of privileged activities so they can easily identify who or what gained access to which systems, what they did at what time, and for how long. | | Least privilege | Ensures that users only gain access to the specific tools they need to complete a task. | -| Multi-tenant | A single instance of the software and its supporting infrastructure serves multiple customers. Each customer shares the software application and also shares a single database. | -| OIDC | OpenID Connect. An authentication protocol that verifies user identity when a user is trying to access a protected HTTPs end point. OIDC is an evolutionary development of ideas implemented earlier in OAuth. | +| Multi-tenant | A single instance of the software and its supporting infrastructure serves multiple customers. Each customer shares the software application and also shares a single database. | +| OIDC | OpenID Connect. An authentication protocol that verifies user identity when a user is trying to access a protected HTTPs end point. OIDC is an evolutionary development of ideas implemented earlier in OAuth. | | PAM | Privileged access management. Tools that offer one or more of these features: discover, manage, and govern privileged accounts on multiple systems and applications; control access to privileged accounts, including shared and emergency access; randomize, manage, and vault credentials (password, keys, etc.) for administrative, service, and application accounts; single sign-on (SSO) for privileged access to prevent credentials from being revealed; control, filter, and orchestrate privileged commands, actions, and tasks; manage and broker credentials to applications, services, and devices to avoid exposure; and monitor, record, audit, and analyze privileged access, sessions, and actions. | | PASM | Privileged accounts are protected by vaulting their credentials. Access to those accounts is then brokered for human users, services, and applications. Privileged session management (PSM) functions establish sessions with possible credential injection and full session recording. Passwords and other credentials for privileged accounts are actively managed and changed at definable intervals or upon the occurrence of specific events. PASM solutions may also provide application-to-application password management (AAPM) and zero-install remote privileged access features for IT staff and third parties that don't require a VPN. | | PEDM | Specific privileges are granted on the managed system by host-based agents to logged-in users. PEDM tools provide host-based command control (filtering); application allow, deny, and isolate controls; and/or privilege elevation. The latter is in the form of allowing particular commands to be run with a higher level of privileges. PEDM tools execute on the actual operating system at the kernel or process level. Command control through protocol filtering is explicitly excluded from this definition because the point of control is less reliable. PEDM tools may also provide file integrity monitoring features. | -| Permission | Rights and privileges. Details given by users or network administrators that define access rights to files on a network. Access controls attached to a resource dictating which identities can access it and how. Privileges are attached to identities and are the ability to perform certain actions. An identity having the ability to perform an action on a resource. | -| POD | Permission on Demand. A type of JIT access that allows the temporary elevation of permissions, enabling identities to access resources on a by-request, timed basis. | -| Permissions creep index (PCI) | A number from 0 to 100 that represents the incurred risk of users with access to high-risk privileges. PCI is a function of users who have access to high-risk privileges but aren't actively using them. | +| Permission | Rights and privileges. Details given by users or network administrators that define access rights to files on a network. Access controls attached to a resource dictating which identities can access it and how. Privileges are attached to identities and are the ability to perform certain actions. An identity having the ability to perform an action on a resource. | +| POD | Permission on Demand. A type of JIT access that allows the temporary elevation of permissions, enabling identities to access resources on a by-request, timed basis. | +| Permissions creep index (PCI) | A number from 0 to 100 that represents the incurred risk of users with access to high-risk privileges. PCI is a function of users who have access to high-risk privileges but aren't actively using them. | | Policy and role management | Maintain rules that govern automatic assignment and removal of access rights. Provides visibility of access rights for selection in access requests, approval processes, dependencies, and incompatibilities between access rights, and more. Roles are a common vehicle for policy management. | -| Privilege | The authority to make changes to a network or computer. Both people and accounts can have privileges, and both can have different levels of privilege. | +| Privilege | The authority to make changes to a network or computer. Both people and accounts can have privileges, and both can have different levels of privilege. | | Privileged account | A login credential to a server, firewall, or other administrative account. Often referred to as admin accounts. Comprised of the actual username and password; these two things together make up the account. A privileged account is allowed to do more things than a normal account. | -| Public Cloud | Computing services offered by third-party providers over the public Internet, making them available to anyone who wants to use or purchase them. They may be free or sold on-demand, allowing customers to pay only per usage for the CPU cycles, storage, or bandwidth they consume. | +| Public Cloud | Computing services offered by third-party providers over the public Internet, making them available to anyone who wants to use or purchase them. They may be free or sold on-demand, allowing customers to pay only per usage for the CPU cycles, storage, or bandwidth they consume. | | Resource | Any entity that uses compute capabilities can be accessed by users and services to perform actions. | -| Role | An IAM identity that has specific permissions. Instead of being uniquely associated with one person, a role is intended to be assumable by anyone who needs it. A role doesn't have standard long-term credentials such as a password or access keys associated with. | -| SCIM | System for Cross–domain Identity Management | -| SIEM | Security Information and Event Management. Technology that supports threat detection, compliance and security incident management through the collection and analysis (both near real time and historical) of security events, as well as a wide variety of other event and contextual data sources. The core capabilities are a broad scope of log event collection and management, the ability to analyze log events and other data across disparate sources, and operational capabilities (such as incident management, dashboards, and reporting). | +| Role | An IAM identity that has specific permissions. Instead of being uniquely associated with one person, a role is intended to be assumable by anyone who needs it. A role doesn't have standard long-term credentials such as a password or access keys associated with. | +| SCIM | System for Cross–domain Identity Management | +| SIEM | Security Information and Event Management. Technology that supports threat detection, compliance and security incident management through the collection and analysis (both near real time and historical) of security events, as well as a wide variety of other event and contextual data sources. The core capabilities are a broad scope of log event collection and management, the ability to analyze log events and other data across disparate sources, and operational capabilities (such as incident management, dashboards, and reporting). | | SOAR | Security orchestration, automation and response (SOAR). Technologies that enable organizations to take inputs from various sources (mostly from security information and event management [SIEM] systems) and apply workflows aligned to processes and procedures. These workflows can be orchestrated via integrations with other technologies and automated to achieve the desired outcome and greater visibility. Other capabilities include case and incident management features; the ability to manage threat intelligence, dashboards and reporting; and analytics that can be applied across various functions. SOAR tools significantly enhance security operations activities like threat detection and response by providing machine-powered assistance to human analysts to improve the efficiency and consistency of people and processes. | | Super user / Super identity | A powerful account used by IT system administrators that can be used to make configurations to a system or application, add or remove users, or delete data. | -| Tenant | A dedicated instance of the services and organization data stored within a specific default location. | -| UUID | Universally unique identifier. A 128-bit label used for information in computer systems. The term globally unique identifier (GUID) is also used.| +| Tenant | A dedicated instance of the services and organization data stored within a specific default location. | +| UUID | Universally unique identifier. A 128-bit label used for information in computer systems. The term globally unique identifier (GUID) is also used.| | Zero trust security | The three foundational principles: explicit verification, breach assumption, and least privileged access.| | ZTNA | Zero trust network access. A product or service that creates an identity- and context-based, logical access boundary around an application or set of applications. The applications are hidden from discovery, and access is restricted via a trust broker to a set of named entities. The broker verifies the identity, context and policy adherence of the specified participants before allowing access and prohibits lateral movement elsewhere in the network. It removes application assets from public visibility and significantly reduces the surface area for attack.| ## Next steps -- For an overview of CloudKnox, see [What's CloudKnox Permissions Management?](cloudknox-overview.md). +- For an overview of Permissions Management, see [What's Permissions Management?](overview.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-add-account-after-onboarding.md b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-add-account-after-onboarding.md similarity index 69% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-add-account-after-onboarding.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/onboard-add-account-after-onboarding.md index bceb2295d459..c02c442060d6 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-add-account-after-onboarding.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-add-account-after-onboarding.md @@ -1,6 +1,6 @@ --- -title: Add an account/ subscription/ project to Microsoft CloudKnox Permissions Management after onboarding is complete -description: How to add an account/ subscription/ project to Microsoft CloudKnox Permissions Management after onboarding is complete. +title: Add an account /subscription/ project to Permissions Management after onboarding is complete +description: How to add an account/ subscription/ project to Permissions Management after onboarding is complete. services: active-directory author: kenwith manager: rkarlin @@ -15,22 +15,22 @@ ms.author: kenwith # Add an account/ subscription/ project after onboarding is complete > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how to add an Amazon Web Services (AWS) account, Microsoft Azure subscription, or Google Cloud Platform (GCP) project in Microsoft CloudKnox Permissions Management (CloudKnox) after you've completed the onboarding process. +This article describes how to add an Amazon Web Services (AWS) account, Microsoft Azure subscription, or Google Cloud Platform (GCP) project in Microsoft Permissions Management after you've completed the onboarding process. ## Add an AWS account after onboarding is complete -1. In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data collectors** tab. +1. In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data collectors** tab. 1. On the **Data collectors** dashboard, select **AWS**. 1. Select the ellipses **(...)** at the end of the row, and then select **Edit Configuration**. - The **CloudKnox Onboarding - Summary** page displays. + The **Permissions Management Onboarding - Summary** page displays. 1. Go to **AWS Account IDs**, and then select **Edit** (the pencil icon). - The **CloudKnox Onboarding - AWS Member Account Details** page displays. + The **Permissions Management Onboarding - AWS Member Account Details** page displays. 1. Go to **Enter Your AWS Account IDs**, and then select **Add** (the plus **+** sign). 1. Copy your account ID from AWS and paste it into the **Enter Account ID** box. @@ -44,7 +44,7 @@ This article describes how to add an Amazon Web Services (AWS) account, Microsof 1. Create a new script for the new account and press the **Enter** key. 1. Paste the script you copied. 1. Locate the account line, delete the original account ID (the one that was previously added), and then run the script. -1. Return to CloudKnox, and the new account ID you added will be added to the list of account IDs displayed in the **CloudKnox Onboarding - Summary** page. +1. Return to Permissions Management, and the new account ID you added will be added to the list of account IDs displayed in the **Permissions Management Onboarding - Summary** page. 1. Select **Verify now & save**. When your changes are saved, the following message displays: **Successfully updated configuration.** @@ -52,11 +52,11 @@ This article describes how to add an Amazon Web Services (AWS) account, Microsof ## Add an Azure subscription after onboarding is complete -1. In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data collectors** tab. +1. In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data collectors** tab. 1. On the **Data collectors** dashboard, select **Azure**. 1. Select the ellipses **(...)** at the end of the row, and then select **Edit Configuration**. - The **CloudKnox Onboarding - Summary** page displays. + The **Permissions Management Onboarding - Summary** page displays. 1. Go to **Azure subscription IDs**, and then select **Edit** (the pencil icon). 1. Go to **Enter your Azure Subscription IDs**, and then select **Add subscription** (the plus **+** sign). @@ -71,18 +71,18 @@ This article describes how to add an Amazon Web Services (AWS) account, Microsof 1. Create a new script for the new subscription and press enter. 1. Paste the script you copied. 1. Locate the subscription line and delete the original subscription ID (the one that was previously added), and then run the script. -1. Return to CloudKnox, and the new subscription ID you added will be added to the list of subscription IDs displayed in the **CloudKnox Onboarding - Summary** page. +1. Return to Permissions Management, and the new subscription ID you added will be added to the list of subscription IDs displayed in the **Permissions Management Onboarding - Summary** page. 1. Select **Verify now & save**. When your changes are saved, the following message displays: **Successfully updated configuration.** ## Add a GCP project after onboarding is complete -1. In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data collectors** tab. +1. In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data collectors** tab. 1. On the **Data collectors** dashboard, select **GCP**. 1. Select the ellipses **(...)** at the end of the row, and then select **Edit Configuration**. - The **CloudKnox Onboarding - Summary** page displays. + The **Permissions Management Onboarding - Summary** page displays. 1. Go to **GCP Project IDs**, and then select **Edit** (the pencil icon). 1. Go to **Enter your GCP Project IDs**, and then select **Add Project ID** (the plus **+** sign). @@ -97,7 +97,7 @@ This article describes how to add an Amazon Web Services (AWS) account, Microsof 1. Create a new script for the new project ID and press enter. 1. Paste the script you copied. 1. Locate the project ID line and delete the original project ID (the one that was previously added), and then run the script. -1. Return to CloudKnox, and the new project ID you added will be added to the list of project IDs displayed in the **CloudKnox Onboarding - Summary** page. +1. Return to Permissions Management, and the new project ID you added will be added to the list of project IDs displayed in the **Permissions Management Onboarding - Summary** page. 1. Select **Verify now & save**. When your changes are saved, the following message displays: **Successfully updated configuration.** @@ -106,7 +106,7 @@ This article describes how to add an Amazon Web Services (AWS) account, Microsof ## Next steps -- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an AWS account](cloudknox-onboard-aws.md). - - For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](cloudknox-onboard-azure.md). -- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a GCP project](cloudknox-onboard-gcp.md). -- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](cloudknox-onboard-enable-controller-after-onboarding.md). +- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an AWS account](onboard-aws.md). + - For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](onboard-azure.md). +- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a GCP project](onboard-gcp.md). +- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](onboard-enable-controller-after-onboarding.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-aws.md b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-aws.md similarity index 55% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-aws.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/onboard-aws.md index 968f5dfb047e..fc4d7b83549e 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-aws.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-aws.md @@ -1,6 +1,6 @@ --- -title: Onboard an Amazon Web Services (AWS) account on CloudKnox Permissions Management -description: How to onboard an Amazon Web Services (AWS) account on CloudKnox Permissions Management. +title: Onboard an Amazon Web Services (AWS) account on Permissions Management +description: How to onboard an Amazon Web Services (AWS) account on Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,150 +15,150 @@ ms.author: kenwith # Onboard an Amazon Web Services (AWS) account > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -> [!NOTE] -> The CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). +> [!NOTE] +> The Permissions Management PREVIEW is currently not available for tenants hosted in the European Union (EU). -This article describes how to onboard an Amazon Web Services (AWS) account on CloudKnox Permissions Management (CloudKnox). +This article describes how to onboard an Amazon Web Services (AWS) account on Permissions Management. -> [!NOTE] -> A *global administrator* or *super admin* (an admin for all authorization system types) can perform the tasks in this article after the global administrator has initially completed the steps provided in [Enable CloudKnox on your Azure Active Directory tenant](cloudknox-onboard-enable-tenant.md). +> [!NOTE] +> A *global administrator* or *super admin* (an admin for all authorization system types) can perform the tasks in this article after the global administrator has initially completed the steps provided in [Enable Permissions Management on your Azure Active Directory tenant](onboard-enable-tenant.md). ## View a training video on configuring and onboarding an AWS account -To view a video on how to configure and onboard AWS accounts in CloudKnox, select [Configure and onboard AWS accounts](https://www.youtube.com/watch?v=R6K21wiWYmE). +To view a video on how to configure and onboard AWS accounts in Permissions Management, select [Configure and onboard AWS accounts](https://www.youtube.com/watch?v=R6K21wiWYmE). ## Onboard an AWS account -1. If the **Data Collectors** dashboard isn't displayed when CloudKnox launches: +1. If the **Data Collectors** dashboard isn't displayed when Permissions Management launches: - - In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. + - In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. 1. On the **Data Collectors** dashboard, select **AWS**, and then select **Create Configuration**. -### 1. Create an Azure AD OIDC App. +### 1. Create an Azure AD OIDC App -1. On the **CloudKnox Onboarding - Azure AD OIDC App Creation** page, enter the **OIDC Azure app name**. +1. On the **Permissions Management Onboarding - Azure AD OIDC App Creation** page, enter the **OIDC Azure app name**. This app is used to set up an OpenID Connect (OIDC) connection to your AWS account. OIDC is an interoperable authentication protocol based on the OAuth 2.0 family of specifications. The scripts generated on this page create the app of this specified name in your Azure AD tenant with the right configuration. - + 1. To create the app registration, copy the script and run it in your Azure command-line app. - > [!NOTE] + > [!NOTE] > 1. To confirm that the app was created, open **App registrations** in Azure and, on the **All applications** tab, locate your app. > 1. Select the app name to open the **Expose an API** page. The **Application ID URI** displayed in the **Overview** page is the *audience value* used while making an OIDC connection with your AWS account. -1. Return to CloudKnox, and in the **CloudKnox Onboarding - Azure AD OIDC App Creation**, select **Next**. +1. Return to Permissions Management, and in the **Permissions Management Onboarding - Azure AD OIDC App Creation**, select **Next**. -### 2. Set up an AWS OIDC account. +### 2. Set up an AWS OIDC account -1. In the **CloudKnox Onboarding - AWS OIDC Account Setup** page, enter the **AWS OIDC account ID** where the OIDC provider is created. You can change the role name to your requirements. +1. In the **Permissions Management Onboarding - AWS OIDC Account Setup** page, enter the **AWS OIDC account ID** where the OIDC provider is created. You can change the role name to your requirements. 1. Open another browser window and sign in to the AWS account where you want to create the OIDC provider. -1. Select **Launch Template**. This link takes you to the **AWS CloudFormation create stack** page. +1. Select **Launch Template**. This link takes you to the **AWS CloudFormation create stack** page. 1. Scroll to the bottom of the page, and in the **Capabilities** box, select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**. Then select **Create Stack.** This AWS CloudFormation stack creates an OIDC Identity Provider (IdP) representing Azure AD STS and an AWS IAM role with a trust policy that allows external identities from Azure AD to assume it via the OIDC IdP. These entities are listed on the **Resources** page. -1. Return to CloudKnox, and in the **CloudKnox Onboarding - AWS OIDC Account Setup** page, select **Next**. +1. Return to Permissions Management, and in the **Permissions Management Onboarding - AWS OIDC Account Setup** page, select **Next**. + +### 3. Set up an AWS master account (Optional) -### 3. Set up an AWS master account. (Optional) +1. If your organization has Service Control Policies (SCPs) that govern some or all of the member accounts, set up the master account connection in the **Permissions Management Onboarding - AWS Master Account Details** page. -1. If your organization has Service Control Policies (SCPs) that govern some or all of the member accounts, set up the master account connection in the **CloudKnox Onboarding - AWS Master Account Details** page. + Setting up the master account connection allows Permissions Management to auto-detect and onboard any AWS member accounts that have the correct Permissions Management role. - Setting up the master account connection allows CloudKnox to auto-detect and onboard any AWS member accounts that have the correct CloudKnox role. + - In the **Permissions Management Onboarding - AWS Master Account Details** page, enter the **Master Account ID** and **Master Account Role**. - - In the **CloudKnox Onboarding - AWS Master Account Details** page, enter the **Master Account ID** and **Master Account Role**. - 1. Open another browser window and sign in to the AWS console for your master account. -1. Return to CloudKnox, and in the **CloudKnox Onboarding - AWS Master Account Details** page, select **Launch Template**. +1. Return to Permissions Management, and in the **Permissions Management Onboarding - AWS Master Account Details** page, select **Launch Template**. The **AWS CloudFormation create stack** page opens, displaying the template. 1. Review the information in the template, make changes, if necessary, then scroll to the bottom of the page. 1. In the **Capabilities** box, select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**. Then select **Create stack**. - - This AWS CloudFormation stack creates a role in the master account with the necessary permissions (policies) to collect SCPs and list all the accounts in your organization. + + This AWS CloudFormation stack creates a role in the master account with the necessary permissions (policies) to collect SCPs and list all the accounts in your organization. A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. - -1. Return to CloudKnox, and in **CloudKnox Onboarding - AWS Master Account Details**, select **Next**. -### 4. Set up an AWS Central logging account. (Optional but recommended) +1. Return to Permissions Management, and in **Permissions Management Onboarding - AWS Master Account Details**, select **Next**. + +### 4. Set up an AWS Central logging account (Optional but recommended) + +1. If your organization has a central logging account where logs from some or all of your AWS account are stored, in the **Permissions Management Onboarding - AWS Central Logging Account Details** page, set up the logging account connection. -1. If your organization has a central logging account where logs from some or all of your AWS account are stored, in the **CloudKnox Onboarding - AWS Central Logging Account Details** page, set up the logging account connection. + In the **Permissions Management Onboarding - AWS Central Logging Account Details** page, enter the **Logging Account ID** and **Logging Account Role**. - In the **CloudKnox Onboarding - AWS Central Logging Account Details** page, enter the **Logging Account ID** and **Logging Account Role**. - 1. In another browser window, sign in to the AWS console for the AWS account you use for central logging. -1. Return to CloudKnox, and in the **CloudKnox Onboarding - AWS Central Logging Account Details** page, select **Launch Template**. +1. Return to Permissions Management, and in the **Permissions Management Onboarding - AWS Central Logging Account Details** page, select **Launch Template**. The **AWS CloudFormation create stack** page opens, displaying the template. 1. Review the information in the template, make changes, if necessary, then scroll to the bottom of the page. 1. In the **Capabilities** box, select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**, and then select **Create stack**. - + This AWS CloudFormation stack creates a role in the logging account with the necessary permissions (policies) to read S3 buckets used for central logging. A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. - -1. Return to CloudKnox, and in the **CloudKnox Onboarding - AWS Central Logging Account Details** page, select **Next**. -### 5. Set up an AWS member account. +1. Return to Permissions Management, and in the **Permissions Management Onboarding - AWS Central Logging Account Details** page, select **Next**. + +### 5. Set up an AWS member account -1. In the **CloudKnox Onboarding - AWS Member Account Details** page, enter the **Member Account Role** and the **Member Account IDs**. +1. In the **Permissions Management Onboarding - AWS Member Account Details** page, enter the **Member Account Role** and the **Member Account IDs**. You can enter up to 10 account IDs. Click the plus icon next to the text box to add more account IDs. > [!NOTE] > Perform the next 6 steps for each account ID you add. -1. Open another browser window and sign in to the AWS console for the member account. +1. Open another browser window and sign in to the AWS console for the member account. -1. Return to the **CloudKnox Onboarding - AWS Member Account Details** page, select **Launch Template**. +1. Return to the **Permissions Management Onboarding - AWS Member Account Details** page, select **Launch Template**. The **AWS CloudFormation create stack** page opens, displaying the template. -1. In the **CloudTrailBucketName** page, enter a name. +1. In the **CloudTrailBucketName** page, enter a name. You can copy and paste the **CloudTrailBucketName** name from the **Trails** page in AWS. - > [!NOTE] - > A *cloud bucket* collects all the activity in a single account that CloudKnox monitors. Enter the name of a cloud bucket here to provide CloudKnox with the access required to collect activity data. + > [!NOTE] + > A *cloud bucket* collects all the activity in a single account that Permissions Management monitors. Enter the name of a cloud bucket here to provide Permissions Management with the access required to collect activity data. -1. From the **Enable Controller** dropdown, select: +1. From the **Enable Controller** dropdown, select: - - **True**, if you want the controller to provide CloudKnox with read and write access so that any remediation you want to do from the CloudKnox platform can be done automatically. - - **False**, if you want the controller to provide CloudKnox with read-only access. + - **True**, if you want the controller to provide Permissions Management with read and write access so that any remediation you want to do from the Permissions Management platform can be done automatically. + - **False**, if you want the controller to provide Permissions Management with read-only access. 1. Scroll to the bottom of the page, and in the **Capabilities** box, select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**. Then select **Create stack**. - This AWS CloudFormation stack creates a collection role in the member account with necessary permissions (policies) for data collection. + This AWS CloudFormation stack creates a collection role in the member account with necessary permissions (policies) for data collection. + + A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. - A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. +1. Return to Permissions Management, and in the **Permissions Management Onboarding - AWS Member Account Details** page, select **Next**. -1. Return to CloudKnox, and in the **CloudKnox Onboarding - AWS Member Account Details** page, select **Next**. - This step completes the sequence of required connections from Azure AD STS to the OIDC connection account and the AWS member account. -### 6. Review and save. +### 6. Review and save -1. In **CloudKnox Onboarding – Summary**, review the information you’ve added, and then select **Verify Now & Save**. +1. In **Permissions Management Onboarding – Summary**, review the information you've added, and then select **Verify Now & Save**. The following message appears: **Successfully created configuration.** - On the **Data Collectors** dashboard, the **Recently Uploaded On** column displays **Collecting**. The **Recently Transformed On** column displays **Processing.** + On the **Data Collectors** dashboard, the **Recently Uploaded On** column displays **Collecting**. The **Recently Transformed On** column displays **Processing.** - You have now completed onboarding AWS, and CloudKnox has started collecting and processing your data. + You have now completed onboarding AWS, and Permissions Management has started collecting and processing your data. -### 7. View the data. +### 7. View the data -1. To view the data, select the **Authorization Systems** tab. +1. To view the data, select the **Authorization Systems** tab. The **Status** column in the table displays **Collecting Data.** @@ -167,7 +167,7 @@ To view a video on how to configure and onboard AWS accounts in CloudKnox, selec ## Next steps -- For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](cloudknox-onboard-azure.md). -- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a Google Cloud Platform (GCP) project](cloudknox-onboard-gcp.md). -- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](cloudknox-onboard-enable-controller-after-onboarding.md). -- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](cloudknox-onboard-add-account-after-onboarding.md). +- For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](onboard-azure.md). +- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a Google Cloud Platform (GCP) project](onboard-gcp.md). +- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](onboard-enable-controller-after-onboarding.md). +- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](onboard-add-account-after-onboarding.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-azure.md b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-azure.md similarity index 54% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-azure.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/onboard-azure.md index 939c093c9b3a..9b21f89b3dbc 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-azure.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-azure.md @@ -1,6 +1,6 @@ --- -title: Onboard a Microsoft Azure subscription in CloudKnox Permissions Management -description: How to a Microsoft Azure subscription on CloudKnox Permissions Management. +title: Onboard a Microsoft Azure subscription in Permissions Management +description: How to a Microsoft Azure subscription on Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,74 +15,74 @@ ms.author: kenwith # Onboard a Microsoft Azure subscription > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management (Permissions Management) is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -> [!NOTE] -> The CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). +> [!NOTE] +> The Permissions Management PREVIEW is currently not available for tenants hosted in the European Union (EU). -This article describes how to onboard a Microsoft Azure subscription or subscriptions on CloudKnox Permissions Management (CloudKnox). Onboarding a subscription creates a new authorization system to represent the Azure subscription in CloudKnox. +This article describes how to onboard a Microsoft Azure subscription or subscriptions on Permissions Management (Permissions Management). Onboarding a subscription creates a new authorization system to represent the Azure subscription in Permissions Management. -> [!NOTE] -> A *global administrator* or *super admin* (an admin for all authorization system types) can perform the tasks in this article after the global administrator has initially completed the steps provided in [Enable CloudKnox on your Azure Active Directory tenant](cloudknox-onboard-enable-tenant.md). +> [!NOTE] +> A *global administrator* or *super admin* (an admin for all authorization system types) can perform the tasks in this article after the global administrator has initially completed the steps provided in [Enable Permissions Management on your Azure Active Directory tenant](onboard-enable-tenant.md). ## Prerequisites -To add CloudKnox to your Azure AD tenant: +To add Permissions Management to your Azure AD tenant: - You must have an Azure AD user account and an Azure command-line interface (Azure CLI) on your system, or an Azure subscription. If you don't already have one, [create a free account](https://azure.microsoft.com/free/). - You must have **Microsoft.Authorization/roleAssignments/write** permission at the subscription or management group scope to perform these tasks. If you don't have this permission, you can ask someone who has this permission to perform these tasks for you. -## View a training video on enabling CloudKnox in your Azure AD tenant +## View a training video on enabling Permissions Management in your Azure AD tenant -To view a video on how to enable CloudKnox in your Azure AD tenant, select [Enable CloudKnox in your Azure AD tenant](https://www.youtube.com/watch?v=-fkfeZyevoo). +To view a video on how to enable Permissions Management in your Azure AD tenant, select [Enable Permissions Management in your Azure AD tenant](https://www.youtube.com/watch?v=-fkfeZyevoo). ## How to onboard an Azure subscription -1. If the **Data Collectors** dashboard isn't displayed when CloudKnox launches: +1. If the **Data Collectors** dashboard isn't displayed when Permissions Management launches: - - In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. + - In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. 1. On the **Data Collectors** dashboard, select **Azure**, and then select **Create Configuration**. ### 1. Add Azure subscription details -1. On the **CloudKnox Onboarding - Azure Subscription Details** page, enter the **Subscription IDs** that you want to onboard. - - > [!NOTE] +1. On the **Permissions Management Onboarding - Azure Subscription Details** page, enter the **Subscription IDs** that you want to onboard. + + > [!NOTE] > To locate the Azure subscription IDs, open the **Subscriptions** page in Azure. > You can enter up to 10 subscriptions IDs. Select the plus sign **(+)** icon next to the text box to enter more subscriptions. -1. From the **Scope** dropdown, select **Subscription** or **Management Group**. The script box displays the role assignment script. - - > [!NOTE] - > Select **Subscription** if you want to assign permissions separately for each individual subscription. The generated script has to be executed once per subscription. +1. From the **Scope** dropdown, select **Subscription** or **Management Group**. The script box displays the role assignment script. + + > [!NOTE] + > Select **Subscription** if you want to assign permissions separately for each individual subscription. The generated script has to be executed once per subscription. > Select **Management Group** if all of your subscriptions are under one management group. The generated script must be executed once for the management group. -1. To give this role assignment to the service principal, copy the script to a file on your system where Azure CLI is installed and execute it. +1. To give this role assignment to the service principal, copy the script to a file on your system where Azure CLI is installed and execute it. You can execute the script once for each subscription, or once for all the subscriptions in the management group. 1. From the **Enable Controller** dropdown, select: - - **True**, if you want the controller to provide CloudKnox with read and write access so that any remediation you want to do from the CloudKnox platform can be done automatically. - - **False**, if you want the controller to provide CloudKnox with read-only access. + - **True**, if you want the controller to provide Permissions Management with read and write access so that any remediation you want to do from the Permissions Management platform can be done automatically. + - **False**, if you want the controller to provide Permissions Management with read-only access. -1. Return to **CloudKnox Onboarding - Azure Subscription Details** page and select **Next**. +1. Return to **Permissions Management Onboarding - Azure Subscription Details** page and select **Next**. ### 2. Review and save. -- In **CloudKnox Onboarding – Summary** page, review the information you’ve added, and then select **Verify Now & Save**. +- In **Permissions Management Onboarding – Summary** page, review the information you've added, and then select **Verify Now & Save**. The following message appears: **Successfully Created Configuration.** - On the **Data Collectors** tab, the **Recently Uploaded On** column displays **Collecting**. The **Recently Transformed On** column displays **Processing.** + On the **Data Collectors** tab, the **Recently Uploaded On** column displays **Collecting**. The **Recently Transformed On** column displays **Processing.** - You have now completed onboarding Azure, and CloudKnox has started collecting and processing your data. + You have now completed onboarding Azure, and Permissions Management has started collecting and processing your data. ### 3. View the data. -- To view the data, select the **Authorization Systems** tab. +- To view the data, select the **Authorization Systems** tab. The **Status** column in the table displays **Collecting Data.** @@ -91,9 +91,9 @@ To view a video on how to enable CloudKnox in your Azure AD tenant, select [Enab ## Next steps -- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an Amazon Web Services (AWS) account](cloudknox-onboard-aws.md). -- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a Google Cloud Platform (GCP) project](cloudknox-onboard-gcp.md). -- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](cloudknox-onboard-enable-controller-after-onboarding.md). -- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](cloudknox-onboard-add-account-after-onboarding.md). -- For an overview on CloudKnox, see [What's CloudKnox Permissions Management?](cloudknox-overview.md). -- For information on how to start viewing information about your authorization system in CloudKnox, see [View key statistics and data about your authorization system](cloudknox-ui-dashboard.md). \ No newline at end of file +- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an Amazon Web Services (AWS) account](onboard-aws.md). +- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a Google Cloud Platform (GCP) project](onboard-gcp.md). +- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](onboard-enable-controller-after-onboarding.md). +- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](onboard-add-account-after-onboarding.md). +- For an overview on Permissions Management, see [What's Permissions Management?](overview.md). +- For information on how to start viewing information about your authorization system in Permissions Management, see [View key statistics and data about your authorization system](ui-dashboard.md). \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-enable-controller-after-onboarding.md b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-enable-controller-after-onboarding.md similarity index 57% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-enable-controller-after-onboarding.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/onboard-enable-controller-after-onboarding.md index 2e380779c657..f8fa037bb911 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-enable-controller-after-onboarding.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-enable-controller-after-onboarding.md @@ -1,6 +1,6 @@ --- -title: Enable or disable the controller in Microsoft CloudKnox Permissions Management after onboarding is complete -description: How to enable or disable the controller in Microsoft CloudKnox Permissions Management after onboarding is complete. +title: Enable or disable the controller in Permissions Management after onboarding is complete +description: How to enable or disable the controller in Permissions Management after onboarding is complete. services: active-directory author: kenwith manager: rkarlin @@ -15,7 +15,7 @@ ms.author: kenwith # Enable or disable the controller after onboarding is complete > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. This article describes how to enable or disable the controller in Microsoft Azure and Google Cloud Platform (GCP) after onboarding is complete. @@ -24,30 +24,30 @@ This article also describes how to enable the controller in Amazon Web Services ## Enable the controller in AWS -> [!NOTE] +> [!NOTE] > You can only enable the controller in AWS; you can't disable it at this time. -1. Sign in to the AWS console of the member account in a separate browser window. -1. Go to the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. +1. Sign in to the AWS console of the member account in a separate browser window. +1. Go to the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. 1. On the **Data Collectors** dashboard, select **AWS**, and then select **Create Configuration**. -1. On the **CloudKnox Onboarding - AWS Member Account Details** page, select **Launch Template**. +1. On the **Permissions Management Onboarding - AWS Member Account Details** page, select **Launch Template**. The **AWS CloudFormation create stack** page opens, displaying the template. -1. In the **CloudTrailBucketName** box, enter a name. +1. In the **CloudTrailBucketName** box, enter a name. You can copy and paste the **CloudTrailBucketName** name from the **Trails** page in AWS. - > [!NOTE] - > A *cloud bucket* collects all the activity in a single account that CloudKnox monitors. Enter the name of a cloud bucket here to provide CloudKnox with the access required to collect activity data. + > [!NOTE] + > A *cloud bucket* collects all the activity in a single account that Permissions Management monitors. Enter the name of a cloud bucket here to provide Permissions Management with the access required to collect activity data. -1. In the **EnableController** box, from the drop-down list, select **True** to provide CloudKnox with read and write access so that any remediation you want to do from the CloudKnox platform can be done automatically. +1. In the **EnableController** box, from the drop-down list, select **True** to provide Permissions Management with read and write access so that any remediation you want to do from the Permissions Management platform can be done automatically. 1. Scroll to the bottom of the page, and in the **Capabilities** box and select **I acknowledge that AWS CloudFormation might create IAM resources with custom names**. Then select **Create stack**. - This AWS CloudFormation stack creates a collection role in the member account with necessary permissions (policies) for data collection. A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. + This AWS CloudFormation stack creates a collection role in the member account with necessary permissions (policies) for data collection. A trust policy is set on this role to allow the OIDC role created in your AWS OIDC account to access it. These entities are listed in the **Resources** tab of your CloudFormation stack. -1. Return to CloudKnox, and on the CloudKnox **Onboarding - AWS Member Account Details** page, select **Next**. -1. On **CloudKnox Onboarding – Summary** page, review the information you’ve added, and then select **Verify Now & Save**. +1. Return to Permissions Management, and on the Permissions Management **Onboarding - AWS Member Account Details** page, select **Next**. +1. On **Permissions Management Onboarding – Summary** page, review the information you've added, and then select **Verify Now & Save**. The following message appears: **Successfully created configuration.** @@ -65,39 +65,38 @@ This article also describes how to enable the controller in Amazon Web Services 1. To add the administrative role assignment, return to the **Access control (IAM)** page, and then select **Add role assignment**. 1. Add or remove the role assignment for Cloud Infrastructure Entitlement Management. -1. Go to the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. +1. Go to the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. 1. On the **Data Collectors** dashboard, select **Azure**, and then select **Create Configuration**. -1. On the **CloudKnox Onboarding - Azure Subscription Details** page, enter the **Subscription ID**, and then select **Next**. -1. On **CloudKnox Onboarding – Summary** page, review the controller permissions, and then select **Verify Now & Save**. +1. On the **Permissions Management Onboarding - Azure Subscription Details** page, enter the **Subscription ID**, and then select **Next**. +1. On **Permissions Management Onboarding – Summary** page, review the controller permissions, and then select **Verify Now & Save**. The following message appears: **Successfully Created Configuration.** ## Enable or disable the controller in GCP -1. Execute the **gcloud auth login**. +1. Execute the **gcloud auth login**. 1. Follow the instructions displayed on the screen to authorize access to your Google account. 1. Execute the **sh mciem-workload-identity-pool.sh** to create the workload identity pool, provider, and service account. -1. Execute the **sh mciem-member-projects.sh** to give CloudKnox permissions to access each of the member projects. +1. Execute the **sh mciem-member-projects.sh** to give Permissions Management permissions to access each of the member projects. - - If you want to manage permissions through CloudKnox, select **Y** to **Enable controller**. + - If you want to manage permissions through Permissions Management, select **Y** to **Enable controller**. - If you want to onboard your projects in read-only mode, select **N** to **Disable controller**. 1. Optionally, execute **mciem-enable-gcp-api.sh** to enable all recommended GCP APIs. -1. Go to the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. +1. Go to the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. 1. On the **Data Collectors** dashboard, select **GCP**, and then select **Create Configuration**. -1. On the **CloudKnox Onboarding - Azure AD OIDC App Creation** page, select **Next**. -1. On the **CloudKnox Onboarding - GCP OIDC Account Details & IDP Access** page, enter the **OIDC Project Number** and **OIDC Project ID**, and then select **Next**. -1. On the **CloudKnox Onboarding - GCP Project IDs** page, enter the **Project IDs**, and then select **Next**. -1. On the **CloudKnox Onboarding – Summary** page, review the information you’ve added, and then select **Verify Now & Save**. +1. On the **Permissions Management Onboarding - Azure AD OIDC App Creation** page, select **Next**. +1. On the **Permissions Management Onboarding - GCP OIDC Account Details & IDP Access** page, enter the **OIDC Project Number** and **OIDC Project ID**, and then select **Next**. +1. On the **Permissions Management Onboarding - GCP Project IDs** page, enter the **Project IDs**, and then select **Next**. +1. On the **Permissions Management Onboarding – Summary** page, review the information you've added, and then select **Verify Now & Save**. The following message appears: **Successfully Created Configuration.** ## Next steps -- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an AWS account](cloudknox-onboard-aws.md). -- For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](cloudknox-onboard-azure.md). -- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a GCP project](cloudknox-onboard-gcp.md). -- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](cloudknox-onboard-add-account-after-onboarding.md). - +- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an AWS account](onboard-aws.md). +- For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](onboard-azure.md). +- For information on how to onboard a Google Cloud Platform (GCP) project, see [Onboard a GCP project](onboard-gcp.md). +- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](onboard-add-account-after-onboarding.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-enable-tenant.md b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-enable-tenant.md new file mode 100644 index 000000000000..3bae1ac5a586 --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-enable-tenant.md @@ -0,0 +1,112 @@ +--- +title: Enable Permissions Management in your organization +description: How to enable Permissions Management in your organization. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 04/20/2022 +ms.author: kenwith +--- + +# Enable Permissions Management in your organization + +> [!IMPORTANT] +> Microsoft Entra Permissions Management is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + + +> [!NOTE] +> The Permissions Management PREVIEW is currently not available for tenants hosted in the European Union (EU). + + + +This article describes how to enable Permissions Management in your organization. Once you've enabled Permissions Management, you can connect it to your Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) platforms. + +> [!NOTE] +> To complete this task, you must have *global administrator* permissions as a user in that tenant. You can't enable Permissions Management as a user from other tenant who has signed in via B2B or via Azure Lighthouse. + +## Prerequisites + +To enable Permissions Management in your organization: + +- You must have an Azure AD tenant. If you don't already have one, [create a free account](https://azure.microsoft.com/free/). +- You must be eligible for or have an active assignment to the global administrator role as a user in that tenant. + +> [!NOTE] +> During public preview, Permissions Management doesn't perform a license check. + +## View a training video on enabling Permissions Management + +- To view a video on how to enable Permissions Management in your Azure AD tenant, select [Enable Permissions Management in your Azure AD tenant](https://www.youtube.com/watch?v=-fkfeZyevoo). +- To view a video on how to configure and onboard AWS accounts in Permissions Management, select [Configure and onboard AWS accounts](https://www.youtube.com/watch?v=R6K21wiWYmE). +- To view a video on how to configure and onboard GCP accounts in Permissions Management, select [Configure and onboard GCP accounts](https://www.youtube.com/watch?app=desktop&v=W3epcOaec28). + + +## How to enable Permissions Management on your Azure AD tenant + +1. In your browser: + 1. Go to [Azure services](https://portal.azure.com) and use your credentials to sign in to [Azure Active Directory](https://ms.portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/Overview). + 1. If you aren't already authenticated, sign in as a global administrator user. + 1. If needed, activate the global administrator role in your Azure AD tenant. + 1. In the Azure AD portal, select **Features highlights**, and then select **Permissions Management**. + + 1. If you're prompted to select a sign in account, sign in as a global administrator for a specified tenant. + + The **Welcome to Permissions Management** screen appears, displaying information on how to enable Permissions Management on your tenant. + +1. To provide access to the Permissions Management application, create a service principal. + + An Azure service principal is a security identity used by user-created apps, services, and automation tools to access specific Azure resources. + + > [!NOTE] + > To complete this step, you must have Azure CLI or Azure PowerShell on your system, or an Azure subscription where you can run Cloud Shell. + + - To create a service principal that points to the Permissions Management application via Cloud Shell: + + 1. Copy the script on the **Welcome** screen: + + `az ad sp create --id b46c3ac5-9da6-418f-a849-0a07a10b3c6c` + + 1. If you have an Azure subscription, return to the Azure AD portal and select **Cloud Shell** on the navigation bar. + If you don't have an Azure subscription, open a command prompt on a Windows Server. + 1. If you have an Azure subscription, paste the script into Cloud Shell and press **Enter**. + + - For information on how to create a service principal through the Azure portal, see [Create an Azure service principal with the Azure CLI](/cli/azure/create-an-azure-service-principal-azure-cli). + + - For information on the **az** command and how to sign in with the no subscriptions flag, see [az login](/cli/azure/reference-index?view=azure-cli-latest#az-login&preserve-view=true). + + - For information on how to create a service principal via Azure PowerShell, see [Create an Azure service principal with Azure PowerShell](/powershell/azure/create-azure-service-principal-azureps?view=azps-7.1.0&preserve-view=true). + + 1. After the script runs successfully, the service principal attributes for Permissions Management display. Confirm the attributes. + + The **Cloud Infrastructure Entitlement Management** application displays in the Azure AD portal under **Enterprise applications**. + +1. Return to the **Welcome to Permissions Management** screen and select **Enable Permissions Management**. + + You have now completed enabling Permissions Management on your tenant. Permissions Management launches with the **Data Collectors** dashboard. + +## Configure data collection settings + +Use the **Data Collectors** dashboard in Permissions Management to configure data collection settings for your authorization system. + +1. If the **Data Collectors** dashboard isn't displayed when Permissions Management launches: + + - In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. + +1. Select the authorization system you want: **AWS**, **Azure**, or **GCP**. + +1. For information on how to onboard an AWS account, Azure subscription, or GCP project into Permissions Management, select one of the following articles and follow the instructions: + + - [Onboard an AWS account](onboard-aws.md) + - [Onboard an Azure subscription](onboard-azure.md) + - [Onboard a GCP project](onboard-gcp.md) + +## Next steps + +- For an overview of Permissions Management, see [What's Permissions Management?](overview.md) +- For a list of frequently asked questions (FAQs) about Permissions Management, see [FAQs](faqs.md). +- For information on how to start viewing information about your authorization system in Permissions Management, see [View key statistics and data about your authorization system](ui-dashboard.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-gcp.md b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-gcp.md similarity index 61% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-gcp.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/onboard-gcp.md index 8b894aea37e7..f811ac098cdd 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-onboard-gcp.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/onboard-gcp.md @@ -1,6 +1,6 @@ --- -title: Onboard a Google Cloud Platform (GCP) project in CloudKnox Permissions Management -description: How to onboard a Google Cloud Platform (GCP) project on CloudKnox Permissions Management. +title: Onboard a Google Cloud Platform (GCP) project in Permissions Management +description: How to onboard a Google Cloud Platform (GCP) project on Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,106 +15,106 @@ ms.author: kenwith # Onboard a Google Cloud Platform (GCP) project > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -> [!NOTE] -> The CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). +> [!NOTE] +> The Permissions Management PREVIEW is currently not available for tenants hosted in the European Union (EU). -This article describes how to onboard a Google Cloud Platform (GCP) project on CloudKnox Permissions Management (CloudKnox). +This article describes how to onboard a Google Cloud Platform (GCP) project on Permissions Management. > [!NOTE] -> A *global administrator* or *super admin* (an admin for all authorization system types) can perform the tasks in this article after the global administrator has initially completed the steps provided in [Enable CloudKnox on your Azure Active Directory tenant](cloudknox-onboard-enable-tenant.md). +> A *global administrator* or *super admin* (an admin for all authorization system types) can perform the tasks in this article after the global administrator has initially completed the steps provided in [Enable Permissions Management on your Azure Active Directory tenant](onboard-enable-tenant.md). ## View a training video on configuring and onboarding a GCP account -To view a video on how to configure and onboard GCP accounts in CloudKnox, select [Configure and onboard GCP accounts](https://www.youtube.com/watch?app=desktop&v=W3epcOaec28). +To view a video on how to configure and onboard GCP accounts in Permissions Management, select [Configure and onboard GCP accounts](https://www.youtube.com/watch?app=desktop&v=W3epcOaec28). ## Onboard a GCP project -1. If the **Data Collectors** dashboard isn't displayed when CloudKnox launches: +1. If the **Data Collectors** dashboard isn't displayed when Permissions Management launches: - - In the CloudKnox home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. + - In the Permissions Management home page, select **Settings** (the gear icon), and then select the **Data Collectors** subtab. 1. On the **Data Collectors** tab, select **GCP**, and then select **Create Configuration**. ### 1. Create an Azure AD OIDC app. -1. On the **CloudKnox Onboarding - Azure AD OIDC App Creation** page, enter the **OIDC Azure App Name**. +1. On the **Permissions Management Onboarding - Azure AD OIDC App Creation** page, enter the **OIDC Azure App Name**. This app is used to set up an OpenID Connect (OIDC) connection to your GCP project. OIDC is an interoperable authentication protocol based on the OAuth 2.0 family of specifications. The scripts generated will create the app of this specified name in your Azure AD tenant with the right configuration. - + 1. To create the app registration, copy the script and run it in your command-line app. - > [!NOTE] + > [!NOTE] > 1. To confirm that the app was created, open **App registrations** in Azure and, on the **All applications** tab, locate your app. > 1. Select the app name to open the **Expose an API** page. The **Application ID URI** displayed in the **Overview** page is the *audience value* used while making an OIDC connection with your AWS account. - 1. Return to CloudKnox, and in the **CloudKnox Onboarding - Azure AD OIDC App Creation**, select **Next**. - + 1. Return to Permissions Management, and in the **Permissions Management Onboarding - Azure AD OIDC App Creation**, select **Next**. + ### 2. Set up a GCP OIDC project. -1. In the **CloudKnox Onboarding - GCP OIDC Account Details & IDP Access** page, enter the **OIDC Project ID** and **OIDC Project Number** of the GCP project in which the OIDC provider and pool will be created. You can change the role name to your requirements. +1. In the **Permissions Management Onboarding - GCP OIDC Account Details & IDP Access** page, enter the **OIDC Project ID** and **OIDC Project Number** of the GCP project in which the OIDC provider and pool will be created. You can change the role name to your requirements. - > [!NOTE] + > [!NOTE] > You can find the **Project number** and **Project ID** of your GCP project on the GCP **Dashboard** page of your project in the **Project info** panel. 1. You can change the **OIDC Workload Identity Pool Id**, **OIDC Workload Identity Pool Provider Id** and **OIDC Service Account Name** to meet your requirements. Optionally, specify **G-Suite IDP Secret Name** and **G-Suite IDP User Email** to enable G-Suite integration. - You can either download and run the script at this point or you can do it in the Google Cloud Shell, as described [later in this article](cloudknox-onboard-gcp.md#4-run-scripts-in-cloud-shell-optional-if-not-already-executed). + You can either download and run the script at this point or you can do it in the Google Cloud Shell, as described [later in this article](onboard-gcp.md#4-run-scripts-in-cloud-shell-optional-if-not-already-executed). 1. Select **Next**. ### 3. Set up GCP member projects. -1. In the **CloudKnox Onboarding - GCP Project Ids** page, enter the **Project IDs**. +1. In the **Permissions Management Onboarding - GCP Project Ids** page, enter the **Project IDs**. You can enter up to 10 GCP project IDs. Select the plus icon next to the text box to insert more project IDs. - -1. You can choose to download and run the script at this point, or you can do it via Google Cloud Shell, as described in the [next step](cloudknox-onboard-gcp.md#4-run-scripts-in-cloud-shell-optional-if-not-already-executed). - + +1. You can choose to download and run the script at this point, or you can do it via Google Cloud Shell, as described in the [next step](onboard-gcp.md#4-run-scripts-in-cloud-shell-optional-if-not-already-executed). + ### 4. Run scripts in Cloud Shell. (Optional if not already executed) -1. In the **CloudKnox Onboarding - GCP Project Ids** page, select **Launch SSH**. +1. In the **Permissions Management Onboarding - GCP Project Ids** page, select **Launch SSH**. 1. To copy all your scripts into your current directory, in **Open in Cloud Shell**, select **Trust repo**, and then select **Confirm**. The Cloud Shell provisions the Cloud Shell machine and makes a connection to your Cloud Shell instance. - > [!NOTE] + > [!NOTE] > Follow the instructions in the browser as they may be different from the ones given here. - The **Welcome to CloudKnox GCP onboarding** screen appears, displaying steps you must complete to onboard your GCP project. + The **Welcome to Permissions Management GCP onboarding** screen appears, displaying steps you must complete to onboard your GCP project. -### 5. Paste the environment vars from the CloudKnox portal. +### 5. Paste the environment vars from the Permissions Management portal. -1. Return to CloudKnox and select **Copy export variables**. +1. Return to Permissions Management and select **Copy export variables**. 1. In the GCP Onboarding shell editor, paste the variables you copied, and then press **Enter**. -1. Execute the **gcloud auth login**. +1. Execute the **gcloud auth login**. 1. Follow instructions displayed on the screen to authorize access to your Google account. 1. Execute the **sh mciem-workload-identity-pool.sh** to create the workload identity pool, provider, and service account. -1. Execute the **sh mciem-member-projects.sh** to give CloudKnox permissions to access each of the member projects. +1. Execute the **sh mciem-member-projects.sh** to give Permissions Management permissions to access each of the member projects. - - If you want to manage permissions through CloudKnox, select **Y** to **Enable controller**. + - If you want to manage permissions through Permissions Management, select **Y** to **Enable controller**. - If you want to onboard your projects in read-only mode, select **N** to **Disable controller**. 1. Optionally, execute **mciem-enable-gcp-api.sh** to enable all recommended GCP APIs. -1. Return to **CloudKnox Onboarding - GCP Project Ids**, and then select **Next**. +1. Return to **Permissions Management Onboarding - GCP Project Ids**, and then select **Next**. ### 6. Review and save. -1. In the **CloudKnox Onboarding – Summary** page, review the information you’ve added, and then select **Verify Now & Save**. +1. In the **Permissions Management Onboarding – Summary** page, review the information you've added, and then select **Verify Now & Save**. The following message appears: **Successfully Created Configuration.** - On the **Data Collectors** tab, the **Recently Uploaded On** column displays **Collecting**. The **Recently Transformed On** column displays **Processing.** + On the **Data Collectors** tab, the **Recently Uploaded On** column displays **Collecting**. The **Recently Transformed On** column displays **Processing.** - You have now completed onboarding GCP, and CloudKnox has started collecting and processing your data. + You have now completed onboarding GCP, and Permissions Management has started collecting and processing your data. ### 7. View the data. @@ -128,7 +128,7 @@ To view a video on how to configure and onboard GCP accounts in CloudKnox, selec ## Next steps -- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an Amazon Web Services (AWS) account](cloudknox-onboard-aws.md). -- For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](cloudknox-onboard-azure.md). -- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](cloudknox-onboard-enable-controller-after-onboarding.md). -- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](cloudknox-onboard-add-account-after-onboarding.md). +- For information on how to onboard an Amazon Web Services (AWS) account, see [Onboard an Amazon Web Services (AWS) account](onboard-aws.md). +- For information on how to onboard a Microsoft Azure subscription, see [Onboard a Microsoft Azure subscription](onboard-azure.md). +- For information on how to enable or disable the controller after onboarding is complete, see [Enable or disable the controller](onboard-enable-controller-after-onboarding.md). +- For information on how to add an account/subscription/project after onboarding is complete, see [Add an account/subscription/project after onboarding is complete](onboard-add-account-after-onboarding.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-overview.md b/articles/active-directory/cloud-infrastructure-entitlement-management/overview.md similarity index 60% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-overview.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/overview.md index cac6d12faa32..67286f887251 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-overview.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/overview.md @@ -1,6 +1,6 @@ --- -title: What's CloudKnox Permissions Management? -description: An introduction to CloudKnox Permissions Management. +title: What's Permissions Management? +description: An introduction to Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -12,41 +12,41 @@ ms.date: 04/20/2022 ms.author: kenwith --- -# What's CloudKnox Permissions Management? +# What's Permissions Management? > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -> [!NOTE] -> The CloudKnox Permissions Management (CloudKnox) PREVIEW is currently not available for tenants hosted in the European Union (EU). +> [!NOTE] +> The Permissions Management PREVIEW is currently not available for tenants hosted in the European Union (EU). ## Overview -CloudKnox Permissions Management (CloudKnox) is a cloud infrastructure entitlement management (CIEM) solution that provides comprehensive visibility into permissions assigned to all identities. For example, over-privileged workload and user identities, actions, and resources across multi-cloud infrastructures in Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). +Permissions Management is a cloud infrastructure entitlement management (CIEM) solution that provides comprehensive visibility into permissions assigned to all identities. For example, over-privileged workload and user identities, actions, and resources across multi-cloud infrastructures in Microsoft Azure, Amazon Web Services (AWS), and Google Cloud Platform (GCP). -CloudKnox detects, automatically right-sizes, and continuously monitors unused and excessive permissions. +Permissions Management detects, automatically right-sizes, and continuously monitors unused and excessive permissions. -Organizations have to consider permissions management as a central piece of their Zero Trust security to implement least privilege access across their entire infrastructure: +Organizations have to consider permissions management as a central piece of their Zero Trust security to implement least privilege access across their entire infrastructure: - Organizations are increasingly adopting multi-cloud strategy and are struggling with the lack of visibility and the increasing complexity of managing access permissions. - With the proliferation of identities and cloud services, the number of high-risk cloud permissions is exploding, expanding the attack surface for organizations. - IT security teams are under increased pressure to ensure access to their expanding cloud estate is secure and compliant. -- The inconsistency of cloud providers’ native access management models makes it even more complex for Security and Identity to manage permissions and enforce least privilege access policies across their entire environment. +- The inconsistency of cloud providers' native access management models makes it even more complex for Security and Identity to manage permissions and enforce least privilege access policies across their entire environment. :::image type="content" source="media/cloudknox-overview/cloudknox-key-cases.png" alt-text="CloudKnox Permissions Management."::: ## Key use cases - -CloudKnox allows customers to address three key use cases: *discover*, *remediate*, and *monitor*. + +Permissions Management allows customers to address three key use cases: *discover*, *remediate*, and *monitor*. ### Discover Customers can assess permission risks by evaluating the gap between permissions granted and permissions used. - Cross-cloud permissions discovery: Granular and normalized metrics for key cloud platforms: AWS, Azure, and GCP. -- Permission Creep Index (PCI): An aggregated metric that periodically evaluates the level of risk associated with the number of unused or excessive permissions across your identities and resources. It measures how much damage identities can cause based on the permissions they have. +- Permission Creep Index (PCI): An aggregated metric that periodically evaluates the level of risk associated with the number of unused or excessive permissions across your identities and resources. It measures how much damage identities can cause based on the permissions they have. - Permission usage analytics: Multi-dimensional view of permissions risk for all identities, actions, and resources. ### Remediate @@ -64,15 +64,15 @@ Customers can detect anomalous activities with machine language-powered (ML-powe - ML-powered anomaly detections. - Context-rich forensic reports around identities, actions, and resources to support rapid investigation and remediation. -CloudKnox deepens Zero Trust security strategies by augmenting the least privilege access principle, allowing customers to: +Permissions Management deepens Zero Trust security strategies by augmenting the least privilege access principle, allowing customers to: -- Get comprehensive visibility: Discover which identity is doing what, where, and when. -- Automate least privilege access: Use access analytics to ensure identities have the right permissions, at the right time. -- Unify access policies across infrastructure as a service (IaaS) platforms: Implement consistent security policies across your cloud infrastructure. +- Get comprehensive visibility: Discover which identity is doing what, where, and when. +- Automate least privilege access: Use access analytics to ensure identities have the right permissions, at the right time. +- Unify access policies across infrastructure as a service (IaaS) platforms: Implement consistent security policies across your cloud infrastructure. ## Next steps -- For information on how to onboard CloudKnox in your organization, see [Enable CloudKnox in your organization](cloudknox-onboard-enable-tenant.md). -- For a list of frequently asked questions (FAQs) about CloudKnox, see [FAQs](cloudknox-faqs.md). \ No newline at end of file +- For information on how to onboard Permissions Management for your organization, see [Enable Permissions Management in your organization](onboard-enable-tenant.md). +- For a list of frequently asked questions (FAQs) about Permissions Management, see [FAQs](faqs.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-account-explorer.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-account-explorer.md similarity index 80% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-account-explorer.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/product-account-explorer.md index 3ee999fbceb9..d36ed904a965 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-account-explorer.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-account-explorer.md @@ -1,6 +1,6 @@ --- -title: The CloudKnox Permissions Management - View roles and identities that can access account information from an external account -description: How to view information about identities that can access accounts from an external account in CloudKnox Permissions Management. +title: View roles and identities that can access account information from an external account +description: How to view information about identities that can access accounts from an external account in Permissions Management. services: active-directory manager: rkarlin ms.service: active-directory @@ -13,14 +13,14 @@ ms.author: kenwith # View roles and identities that can access account information from an external account > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -You can view information about users, groups, and resources that can access account information from an external account in CloudKnox Permissions Management (CloudKnox). +You can view information about users, groups, and resources that can access account information from an external account in Permissions Management. ## Display information about users, groups, or tasks -1. In CloudKnox, select the **Usage analytics** tab, and then, from the dropdown, select one of the following: +1. In Permissions Management, select the **Usage analytics** tab, and then, from the dropdown, select one of the following: - **Users** - **Group** @@ -31,8 +31,8 @@ You can view information about users, groups, and resources that can access acco 1. To choose an account from your authorization system, select the lock icon in the left panel. 1. In the **Authorization systems** pane, select an account, then select **Apply**. -1. To choose a user, role, or group, select the person icon. -1. Select a user or group, then select **Apply**. +1. To choose a user, role, or group, select the person icon. +1. Select a user or group, then select **Apply**. 1. To choose an account from your authorization system, select it from the Authorization Systems menu. 1. In the user type filter, user, role, or group. 1. In the **Task** filter, select **All** or **High-risk tasks**, then select **Apply**. @@ -53,7 +53,7 @@ To export the data in comma-separated values (CSV) file format, select **Export* 1. To view all the identities from various accounts that can assume this role, select the down arrow to the left of the role name. 1. To view a graph of all the identities that can access the specified account and through which role(s), select the role name. - If CloudKnox is monitoring the external account, it lists specific identities from the accounts that can assume this role. Otherwise, it lists the identities declared in the **Trusted entity** section. + If Permissions Management is monitoring the external account, it lists specific identities from the accounts that can assume this role. Otherwise, it lists the identities declared in the **Trusted entity** section. **Connecting roles**: Lists the following roles for each account: - *Direct roles* that are trusted by the account role. @@ -62,7 +62,7 @@ To export the data in comma-separated values (CSV) file format, select **Export* 1. To view all the roles from that account that are used to access the specified account, select the down arrow to the left of the account name. 1. To view the trusted identities declared by the role, select the down arrow to the left of the role name. - The trusted identities for the role are listed only if the account is being monitored by CloudKnox. + The trusted identities for the role are listed only if the account is being monitored by Permissions Management. 1. To view the role definition, select the "eye" icon to the right of the role name. @@ -75,4 +75,4 @@ To export the data in comma-separated values (CSV) file format, select **Export* 1. The **Info** tab displays the **Privilege creep index** and **Service control policy (SCP)** information about the account. -For more information about the **Privilege creep index** and SCP information, see [View key statistics and data about your authorization system](cloudknox-ui-dashboard.md). +For more information about the **Privilege creep index** and SCP information, see [View key statistics and data about your authorization system](ui-dashboard.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-account-settings.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-account-settings.md similarity index 61% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-account-settings.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/product-account-settings.md index 5ab6917745ce..7219ed8d1fb9 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-account-settings.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-account-settings.md @@ -1,6 +1,6 @@ --- -title: View personal and organization information in CloudKnox Permissions Management -description: How to view personal and organization information in the Account settings dashboard in CloudKnox Permissions Management. +title: View personal and organization information in Permissions Management +description: How to view personal and organization information in the Account settings dashboard in Permissions Management. services: active-directory manager: rkarlin ms.service: active-directory @@ -13,21 +13,21 @@ ms.author: kenwith # View personal and organization information > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The **Account settings** dashboard in CloudKnox Permissions Management (CloudKnox) allows you to view personal information, passwords, and account preferences. +The **Account settings** dashboard in Permissions Management allows you to view personal information, passwords, and account preferences. This information can't be modified because the user information is pulled from Azure AD. Only **User Session Time(min)** ## View personal information -1. In the CloudKnox home page, select the down arrow to the right of the **User** (your initials) menu, and then select **Account Settings**. +1. In the Permissions Management home page, select the down arrow to the right of the **User** (your initials) menu, and then select **Account Settings**. - The **Personal Information** box displays your **First Name**, **Last Name**, and the **Email Address** that was used to register your account on CloudKnox. + The **Personal Information** box displays your **First Name**, **Last Name**, and the **Email Address** that was used to register your account on Permissions Management. ## View current organization information -1. In the CloudKnox home page, select the down arrow to the right of the **User** (your initials) menu, and then select **Account Settings**. +1. In the Permissions Management home page, select the down arrow to the right of the **User** (your initials) menu, and then select **Account Settings**. The **Current Organization Information** displays the **Name** of your organization, the **Tenant ID** box, and the **User Session Timeout (min)**. @@ -37,6 +37,6 @@ This information can't be modified because the user information is pulled from A ## Next steps -- For information about how to manage user information, see [Manage users and groups with the User management dashboard](cloudknox-ui-user-management.md). -- For information about how to view information about active and completed tasks, see [View information about active and completed tasks](cloudknox-ui-tasks.md). -- For information about how to select group-based permissions settings, see [Select group-based permissions settings](cloudknox-howto-create-group-based-permissions.md). +- For information about how to manage user information, see [Manage users and groups with the User management dashboard](ui-user-management.md). +- For information about how to view information about active and completed tasks, see [View information about active and completed tasks](ui-tasks.md). +- For information about how to select group-based permissions settings, see [Select group-based permissions settings](how-to-create-group-based-permissions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-audit-trail.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-audit-trail.md similarity index 83% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-audit-trail.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/product-audit-trail.md index ef3ea798af79..fc0679b50dab 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-audit-trail.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-audit-trail.md @@ -1,6 +1,6 @@ --- -title: Filter and query user activity in CloudKnox Permissions Management -description: How to filter and query user activity in CloudKnox Permissions Management. +title: Filter and query user activity in Permissions Management +description: How to filter and query user activity in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,27 +15,27 @@ ms.author: kenwith # Filter and query user activity > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The **Audit** dashboard in CloudKnox Permissions Management (CloudKnox) details all user activity performed in your authorization system. It captures all high risk activity in a centralized location, and allows system administrators to query the logs. The **Audit** dashboard enables you to: +The **Audit** dashboard in Permissions Management details all user activity performed in your authorization system. It captures all high risk activity in a centralized location, and allows system administrators to query the logs. The **Audit** dashboard enables you to: - Create and save new queries so you can access key data points easily. - Query across multiple authorization systems in one query. ## Filter information by authorization system -If you haven't used filters before, the default filter is the first authorization system in the filter list. +If you haven't used filters before, the default filter is the first authorization system in the filter list. If you have used filters before, the default filter is last filter you selected. -1. To display the **Audit** dashboard, on the CloudKnox home page, select **Audit**. +1. To display the **Audit** dashboard, on the Permissions Management home page, select **Audit**. -1. To select your authorization system type, in the **Authorization System Type** box, select Amazon Web Services (**AWS**), Microsoft Azure (**Azure**), Google Cloud Platform (**GCP**), or Platform (**Platform**). +1. To select your authorization system type, in the **Authorization System Type** box, select Amazon Web Services (**AWS**), Microsoft Azure (**Azure**), Google Cloud Platform (**GCP**), or Platform (**Platform**). 1. To select your authorization system, in the **Authorization System** box: - - From the **List** subtab, select the accounts you want to use. + - From the **List** subtab, select the accounts you want to use. - From the **Folders** subtab, select the folders you want to use. 1. To view your query results, select **Apply**. @@ -48,7 +48,7 @@ There are several different query parameters you can configure individually or i - To view an existing query, select **View** (the eye icon). - To edit an existing query, select **Edit** (the pencil icon). - To delete a function line in a query, select **Delete** (the minus sign **-** icon). -- To create multiple queries at one time, select **Add New Tab** to the right of the **Query** tabs that are displayed. +- To create multiple queries at one time, select **Add New Tab** to the right of the **Query** tabs that are displayed. You can open a maximum number of six query tab pages at the same time. A message will appear when you've reached the maximum. @@ -69,7 +69,7 @@ There are several different query parameters you can configure individually or i - **Is**: Select this option to choose a specific date from the calendar. - **Custom**: Select this option to set a date range from the **From** and **To** calendars. -1. To run the query on the current selection, select **Search**. +1. To run the query on the current selection, select **Search**. 1. To save your query, select **Save**. @@ -80,7 +80,7 @@ There are several different query parameters you can configure individually or i The **Operator** menu displays the following options depending on the identity you select in the first dropdown: - **Is** / **Is Not**: View a list of all available usernames. You can either select or enter a username in the box. -- **Contains** / **Not Contains**: Enter text that the **Username** should or shouldn't contain, for example, *CloudKnox*. +- **Contains** / **Not Contains**: Enter text that the **Username** should or shouldn't contain, for example, *Permissions Management*. - **In** / **Not In**: View a list all available usernames and select multiple usernames. ### Create a query with a username @@ -95,11 +95,11 @@ The **Operator** menu displays the following options depending on the identity y You can change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with the username **Test**. -1. Select the plus (**+**) sign, select **Or** with **Contains**, and then enter a username, for example, *CloudKnox*. +1. Select the plus (**+**) sign, select **Or** with **Contains**, and then enter a username, for example, *Permissions Management*. 1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). -1. To run the query on the current selection, select **Search**. +1. To run the query on the current selection, select **Search**. 1. To clear the recent selections, select **Reset**. @@ -113,13 +113,13 @@ The **Operator** menu displays the following options depending on the identity y 1. To add criteria to this section, select **Add**. - You can change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with resource name **Test**. + You can change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with resource name **Test**. -1. Select the plus (**+**) sign, select **Or** with **Contains**, and then enter a username, for example, *CloudKnox*. +1. Select the plus (**+**) sign, select **Or** with **Contains**, and then enter a username, for example, *Permissions Management*. 1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). -1. To run the query on the current selection, select **Search**. +1. To run the query on the current selection, select **Search**. 1. To clear the recent selections, select **Reset**. @@ -133,9 +133,9 @@ The **Operator** menu displays the following options depending on the identity y 1. To add criteria to this section, select **Add**. -1. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with resource type **s3::bucket**. +1. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with resource type **s3::bucket**. -1. Select the plus (**+**) sign, select **Or** with **Is**, and then enter or select `ec2::instance`. +1. Select the plus (**+**) sign, select **Or** with **Is**, and then enter or select `ec2::instance`. 1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). @@ -152,15 +152,15 @@ The **Operator** menu displays the following options depending on the identity y 1. From the **Operator** menu, select the required option. -1. To add criteria to this section, select **Add**. +1. To add criteria to this section, select **Add**. -1. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with task name **s3:CreateBucket**. +1. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with task name **s3:CreateBucket**. -1. Select **Add**, select **Or** with **Is**, and then enter or select `ec2:TerminateInstance`. +1. Select **Add**, select **Or** with **Is**, and then enter or select `ec2:TerminateInstance`. 1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). -1. To run the query on the current selection, select **Search**. +1. To run the query on the current selection, select **Search**. 1. To clear the recent selections, select **Reset**. @@ -174,15 +174,15 @@ The **Operator** menu displays the following options depending on the identity y - **Is** / **Is not**: Allows a user to select in the value field and select **Authorization Failure**, **Error**, or **Success**. -1. To add criteria to this section, select **Add**. +1. To add criteria to this section, select **Add**. -1. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with State **Authorization Failure**. +1. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** with State **Authorization Failure**. -1. Select the **Add** icon, select **Or** with **Is**, and then select **Success**. +1. Select the **Add** icon, select **Or** with **Is**, and then select **Success**. 1. To remove a row of criteria, select **Remove** (the minus sign **-** icon). -1. To run the query on the current selection, select **Search**. +1. To run the query on the current selection, select **Search**. 1. To clear the recent selections, select **Reset**. @@ -194,15 +194,15 @@ The **Operator** menu displays the following options depending on the identity y 3. From the **Operator** menu, select the required option. -4. To add criteria to this section, select **Add**. +4. To add criteria to this section, select **Add**. -5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Contains** with free text **Test**. +5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Contains** with free text **Test**. -6. Select the **Add** icon, select **Or** with **Contains**, and then enter your criteria, for example *CloudKnox*. +6. Select the **Add** icon, select **Or** with **Contains**, and then enter your criteria, for example *Permissions Management*. 7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). -8. To run the query on the current selection, select **Search**. +8. To run the query on the current selection, select **Search**. 9. To clear the recent selections, select **Reset**. @@ -214,15 +214,15 @@ The **Operator** menu displays the following options depending on the identity y 3. From the **Operator** menu, select the required option. -4. To add criteria to this section, select **Add**. +4. To add criteria to this section, select **Add**. -5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Contains** with free text **Test**. +5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Contains** with free text **Test**. -6. Select the **Add** icon, select **Or** with **Contains**, and then enter your criteria, for example *CloudKnox*. +6. Select the **Add** icon, select **Or** with **Contains**, and then enter your criteria, for example *Permissions Management*. 7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). -8. To run the query on the current selection, select **Search**. +8. To run the query on the current selection, select **Search**. 9. To clear the recent selections, select **Reset**. @@ -234,11 +234,11 @@ The **Operator** menu displays the following options depending on the identity y 3. From the **Operator** menu, select the required option. -4. To add criteria to this section, select **Add**. +4. To add criteria to this section, select **Add**. -5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Contains** with free `AKIAIFXNDW2Z2MPEH5OQ`. +5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Contains** with free `AKIAIFXNDW2Z2MPEH5OQ`. -6. Select the **Add** icon, select **Or** with **Not** **Contains**, and then enter `AKIAVP2T3XG7JUZRM7WU`. +6. Select the **Add** icon, select **Or** with **Not** **Contains**, and then enter `AKIAVP2T3XG7JUZRM7WU`. 7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). @@ -256,13 +256,13 @@ The **Operator** menu displays the following options depending on the identity y 4. To add criteria to this section, select **Add**. -5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** and type in, or select **Test**. +5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** and type in, or select **Test**. -6. Select the **Add** icon, select **Or** with **Is**, and then enter your criteria, for example *CloudKnox*. +6. Select the **Add** icon, select **Or** with **Is**, and then enter your criteria, for example *Permissions Management*. 7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). -8. To run the query on the current selection, select **Search**. +8. To run the query on the current selection, select **Search**. 9. To clear the recent selections, select **Reset**. @@ -276,19 +276,19 @@ The **Operator** menu displays the following options depending on the identity y 4. To add criteria to this section, select **Add**. -5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** and type in, or select **Test**. +5. Change the operation between **And** / **Or** statements, and select other criteria. For example, the first set of criteria selected can be **Is** and type in, or select **Test**. -6. Select the **Add** icon, select **Or** with **Is**, and then enter your criteria, for example *CloudKnox*. +6. Select the **Add** icon, select **Or** with **Is**, and then enter your criteria, for example *Permissions Management*. 7. To remove a row of criteria, select **Remove** (the minus sign **-** icon). -8. To run the query on the current selection, select **Search**. +8. To run the query on the current selection, select **Search**. 9. To clear the recent selections, select **Reset**. ### View query results -1. In the **Activity** table, your query results display in columns. +1. In the **Activity** table, your query results display in columns. The results display all executed tasks that aren't read-only. @@ -300,7 +300,7 @@ The **Operator** menu displays the following options depending on the identity y - **Resource Name**: The name of the resource on which the task is being performed. - If the column displays **Multiple**, it means multiple resources are listed in the column. + If the column displays **Multiple**, it means multiple resources are listed in the column. 1. To view a list of all resources, hover over **Multiple**. @@ -323,22 +323,22 @@ The **Operator** menu displays the following options depending on the identity y 2. In the **Query Name** box, enter a name for your query, and then select **Save**. -3. To save a query with a different name, select the ellipses (**...**) next to **Save**, and then select **Save As**. +3. To save a query with a different name, select the ellipses (**...**) next to **Save**, and then select **Save As**. 4. Make your query selections from the **New Query** section, select the ellipses (**...**), and then select **Save As**. -5. To save a new query, in the **Save Query** box, enter the name for the query, and then select **Save**. +5. To save a new query, in the **Save Query** box, enter the name for the query, and then select **Save**. -6. To save an existing query you've modified, select the ellipses (**...**). +6. To save an existing query you've modified, select the ellipses (**...**). - To save a modified query under the same name, select **Save**. - To save a modified query under a different name, select **Save As**. ### View a saved query -1. Select **Saved Queries**, and then select a query from the **Load Queries** list. +1. Select **Saved Queries**, and then select a query from the **Load Queries** list. - A message box opens with the following options: **Load with the saved authorization system** or **Load with the currently selected authorization system**. + A message box opens with the following options: **Load with the saved authorization system** or **Load with the currently selected authorization system**. 1. Select the appropriate option, and then select **Load Queries**. @@ -366,16 +366,16 @@ The **Operator** menu displays the following options depending on the identity y ### Save a query under a different name -- Select the ellipses (**...**). +- Select the ellipses (**...**). System queries have only one option: - **Duplicate**: Creates a duplicate of the query and names the file *Copy of XXX*. - Custom queries have the following options: + Custom queries have the following options: - **Rename**: Enter the new name of the query and select **Save**. - - **Delete**: Delete the saved query. + - **Delete**: Delete the saved query. The **Delete Query** box opens, asking you to confirm that you want to delete the query. Select **Yes** or **No**. @@ -391,11 +391,11 @@ The **Operator** menu displays the following options depending on the identity y - To export the results of the query, select **Export**. - CloudKnox exports the results in comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. + Permissions Management exports the results in comma-separated values (**CSV**) format, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) format. ## Next steps -- For information on how to view how users access information, see [Use queries to see how users access information](cloudknox-ui-audit-trail.md). -- For information on how to create a query, see [Create a custom query](cloudknox-howto-create-custom-queries.md). -- For information on how to generate an on-demand report from a query, see [Generate an on-demand report from a query](cloudknox-howto-audit-trail-results.md). +- For information on how to view how users access information, see [Use queries to see how users access information](ui-audit-trail.md). +- For information on how to create a query, see [Create a custom query](how-to-create-custom-queries.md). +- For information on how to generate an on-demand report from a query, see [Generate an on-demand report from a query](how-to-audit-trail-results.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-dashboard.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-dashboard.md similarity index 79% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-dashboard.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/product-dashboard.md index 48d0653e35e9..7822f837ca11 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-dashboard.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-dashboard.md @@ -1,6 +1,6 @@ --- -title: View data about the activity in your authorization system in CloudKnox Permissions Management -description: How to view data about the activity in your authorization system in the CloudKnox Dashboard in CloudKnox Permissions Management. +title: View data about the activity in your authorization system in Permissions Management +description: How to view data about the activity in your authorization system in the Permissions Management Dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -17,35 +17,35 @@ ms.author: kenwith # View data about the activity in your authorization system > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The CloudKnox Permissions Management (CloudKnox) **Dashboard** provides an overview of the authorization system and account activity being monitored. You can use this dashboard to view data collected from your Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP) authorization systems. +The Permissions Management **Dashboard** provides an overview of the authorization system and account activity being monitored. You can use this dashboard to view data collected from your Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP) authorization systems. ## View data about your authorization system -1. In the CloudKnox home page, select **Dashboard**. +1. In the Permissions Management home page, select **Dashboard**. 1. From the **Authorization systems type** dropdown, select **AWS**, **Azure**, or **GCP**. -1. Select the **Authorization System** box to display a **List** of accounts and **Folders** available to you. -1. Select the accounts and folders you want, and then select **Apply**. +1. Select the **Authorization System** box to display a **List** of accounts and **Folders** available to you. +1. Select the accounts and folders you want, and then select **Apply**. The **Permission Creep Index (PCI)** chart updates to display information about the accounts and folders you selected. The number of days since the information was last updated displays in the upper right corner. 1. In the Permission Creep Index (PCI) graph, select a bubble. - The bubble displays the number of identities that are considered high-risk. + The bubble displays the number of identities that are considered high-risk. *High-risk* refers to the number of users who have permissions that exceed their normal or required usage. -1. Select the box to display detailed information about the identities contributing to the **Low PCI**, **Medium PCI**, and **High PCI**. +1. Select the box to display detailed information about the identities contributing to the **Low PCI**, **Medium PCI**, and **High PCI**. 1. The **Highest PCI change** displays the authorization system name with the PCI number and the change number for the last seven days, if applicable. - + - To view all the changes and PCI ratings in your authorization system, select **View all**. -1. To return to the PCI graph, select the **Graph** icon in the upper right of the list box. +1. To return to the PCI graph, select the **Graph** icon in the upper right of the list box. -For more information about the CloudKnox **Dashboard**, see [View key statistics and data about your authorization system](cloudknox-ui-dashboard.md). +For more information about the Permissions Management **Dashboard**, see [View key statistics and data about your authorization system](ui-dashboard.md). ## View user data on the PCI heat map @@ -53,7 +53,7 @@ The **Permission Creep Index (PCI)** heat map shows the incurred risk of users w - To view detailed data about a user, select the number. - The PCI trend graph shows you the historical trend of the PCI score over the last 90 days. + The PCI trend graph shows you the historical trend of the PCI score over the last 90 days. - To download the **PCI History** report, select **Download** (the down arrow icon). @@ -69,7 +69,7 @@ To view specific information about the following, select the number displayed on ## View identity findings -The **Identity** section below the heat map on the left side of the page shows all the relevant findings about identities, including roles that can access secret information, roles that are inactive, over provisioned active roles, and so on. +The **Identity** section below the heat map on the left side of the page shows all the relevant findings about identities, including roles that can access secret information, roles that are inactive, over provisioned active roles, and so on. - To expand the full list of identity findings, select **All findings**. @@ -79,4 +79,4 @@ The **Resource** section below the heat map on the right side of the page shows ## Next steps -- For more information about how to view key statistics and data in the Dashboard, see [View key statistics and data about your authorization system](cloudknox-ui-dashboard.md). +- For more information about how to view key statistics and data in the Dashboard, see [View key statistics and data about your authorization system](ui-dashboard.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-data-inventory.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-data-inventory.md similarity index 68% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-data-inventory.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/product-data-inventory.md index 594f7f8b54df..50ad92ce4918 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-data-inventory.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-data-inventory.md @@ -1,6 +1,6 @@ --- -title: CloudKnox Permissions Management - Display an inventory of created resources and licenses for your authorization system -description: How to display an inventory of created resources and licenses for your authorization system in CloudKnox Permissions Management. +title: Display an inventory of created resources and licenses for your authorization system +description: How to display an inventory of created resources and licenses for your authorization system in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,15 +15,15 @@ ms.author: kenwith # Display an inventory of created resources and licenses for your authorization system > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -You can use the **Inventory** dashboard in CloudKnox Permissions Management (CloudKnox) to display an inventory of created resources and licensing information for your authorization system and its associated accounts. +You can use the **Inventory** dashboard in Permissions Management to display an inventory of created resources and licensing information for your authorization system and its associated accounts. ## View resources created for your authorization system -1. To access your inventory information, in the CloudKnox home page, select **Settings** (the gear icon). -1. Select the **Inventory** tab, select the **Inventory** subtab, and then select your authorization system type: +1. To access your inventory information, in the Permissions Management home page, select **Settings** (the gear icon). +1. Select the **Inventory** tab, select the **Inventory** subtab, and then select your authorization system type: - **AWS** for Amazon Web Services. - **Azure** for Microsoft Azure. @@ -37,7 +37,7 @@ You can use the **Inventory** dashboard in CloudKnox Permissions Management (Clo ## View the number of licenses associated with your authorization system -1. To access licensing information about your data sources, in the CloudKnox home page, select **Settings** (the gear icon). +1. To access licensing information about your data sources, in the Permissions Management home page, select **Settings** (the gear icon). 1. Select the **Inventory** tab, select the **Licensing** subtab, and then select your authorization system type. @@ -48,9 +48,9 @@ You can use the **Inventory** dashboard in CloudKnox Permissions Management (Clo - The number of **Serverless** licenses. - The number of **Compute containers**. - The number of **Databases**. - - The **Total number of licenses**. + - The **Total number of licenses**. ## Next steps -- For information about viewing and configuring settings for collecting data from your authorization system and its associated accounts, see [View and configure settings for data collection](cloudknox-product-data-sources.md). +- For information about viewing and configuring settings for collecting data from your authorization system and its associated accounts, see [View and configure settings for data collection](product-data-sources.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-data-sources.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-data-sources.md similarity index 70% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-data-sources.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/product-data-sources.md index 2e28b3153498..35fc4609c126 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-data-sources.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-data-sources.md @@ -1,6 +1,6 @@ --- -title: View and configure settings for data collection from your authorization system in CloudKnox Permissions Management -description: How to view and configure settings for collecting data from your authorization system in CloudKnox Permissions Management. +title: View and configure settings for data collection from your authorization system in Permissions Management +description: How to view and configure settings for collecting data from your authorization system in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -12,20 +12,20 @@ ms.date: 02/23/2022 ms.author: kenwith --- -# View and configure settings for data collection +# View and configure settings for data collection > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -You can use the **Data Collectors** dashboard in CloudKnox Permissions Management (CloudKnox) to view and configure settings for collecting data from your authorization systems. It also provides information about the status of the data collection. +You can use the **Data Collectors** dashboard in Permissions Management to view and configure settings for collecting data from your authorization systems. It also provides information about the status of the data collection. ## Access and view data sources -1. To access your data sources, in the CloudKnox home page, select **Settings** (the gear icon). Then select the **Data Collectors** tab. +1. To access your data sources, in the Permissions Management home page, select **Settings** (the gear icon). Then select the **Data Collectors** tab. -1. On the **Data Collectors** dashboard, select your authorization system type: +1. On the **Data Collectors** dashboard, select your authorization system type: - **AWS** for Amazon Web Services. - **Azure** for Microsoft Azure. @@ -48,54 +48,54 @@ You can use the **Data Collectors** dashboard in CloudKnox Permissions Managemen - **ID**: The unique identification number for the data collector. - **Data types**: Displays the data types that are collected: - **Entitlements**: The permissions of all identities and resources for all the configured authorization systems. - - **Recently uploaded on**: Displays whether the entitlement data is being collected. + - **Recently uploaded on**: Displays whether the entitlement data is being collected. The status displays *ONLINE* if the data collection has no errors and *OFFLINE* if there are errors. - **Recently transformed on**: Displays whether the entitlement data is being processed. - The status displays *ONLINE* if the data processing has no errors and *OFFLINE* if there are errors. + The status displays *ONLINE* if the data processing has no errors and *OFFLINE* if there are errors. - The **Tenant ID**. - The **Tenant name**. -## Modify a data collector +## Modify a data collector 1. Select the ellipses **(...)** at the end of the row in the table. -1. Select **Edit Configuration**. +1. Select **Edit Configuration**. - The **CloudKnox Onboarding - Summary** box displays. + The **Permissions Management Onboarding - Summary** box displays. -1. Select **Edit** (the pencil icon) for each field you want to change. +1. Select **Edit** (the pencil icon) for each field you want to change. 1. Select **Verify now & save**. To verify your changes later, select **Save & verify later**. When your changes are saved, the following message displays: **Successfully updated configuration.** - -## Delete a data collector + +## Delete a data collector 1. Select the ellipses **(...)** at the end of the row in the table. -1. Select **Delete Configuration**. +1. Select **Delete Configuration**. - The **CloudKnox Onboarding - Summary** box displays. + The **Permissions Management Onboarding - Summary** box displays. 1. Select **Delete**. -1. Check your email for a one time password (OTP) code, and enter it in **Enter OTP**. +1. Check your email for a one time password (OTP) code, and enter it in **Enter OTP**. If you don't receive an OTP, select **Resend OTP**. The following message displays: **Successfully deleted configuration.** -## Start collecting data from an authorization system +## Start collecting data from an authorization system 1. Select the **Authorization Systems** tab, and then select your authorization system type. 1. Select the ellipses **(...)** at the end of the row in the table. 1. Select **Collect Data**. - A message displays to confirm data collection has started. + A message displays to confirm data collection has started. -## Stop collecting data from an authorization system +## Stop collecting data from an authorization system 1. Select the ellipses **(...)** at the end of the row in the table. -1. To delete your authorization system, select **Delete**. +1. To delete your authorization system, select **Delete**. The **Validate OTP To Delete Authorization System** box displays. @@ -104,4 +104,4 @@ You can use the **Data Collectors** dashboard in CloudKnox Permissions Managemen ## Next steps -- For information about viewing an inventory of created resources and licensing information for your authorization system, see [Display an inventory of created resources and licenses for your authorization system](cloudknox-product-data-inventory.md) +- For information about viewing an inventory of created resources and licensing information for your authorization system, see [Display an inventory of created resources and licenses for your authorization system](product-data-inventory.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-define-permission-levels.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-define-permission-levels.md similarity index 85% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-define-permission-levels.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/product-define-permission-levels.md index d775a826389a..9aeb4875d5cc 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-define-permission-levels.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-define-permission-levels.md @@ -1,6 +1,6 @@ --- -title: Define and manage users, roles, and access levels in CloudKnox Permissions Management -description: How to define and manage users, roles, and access levels in CloudKnox Permissions Management User management dashboard. +title: Define and manage users, roles, and access levels in Permissions Management +description: How to define and manage users, roles, and access levels in Permissions Management User management dashboard. services: active-directory author: kenwith manager: rkarlin @@ -15,14 +15,14 @@ ms.author: kenwith # Define and manage users, roles, and access levels > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -In CloudKnox Permissions Management (CloudKnox), a key component of the interface is the User management dashboard. This topic describes how system administrators can define and manage users, their roles, and their access levels in the system. +In Permissions Management, a key component of the interface is the User management dashboard. This topic describes how system administrators can define and manage users, their roles, and their access levels in the system. ## The User management dashboard -The CloudKnox User management dashboard provides a high-level overview of: +The Permissions Management User management dashboard provides a high-level overview of: - Registered and invited users. - Permissions allowed for each user within a given system. @@ -33,58 +33,58 @@ It also provides the functionality to invite or delete a user, edit, view, and c ## Manage users for customers without SAML integration -Follow this process to invite users if the customer hasn't enabled SAML integration with the CloudKnox application. +Follow this process to invite users if the customer hasn't enabled SAML integration with the Permissions Management application. -### Invite a user to CloudKnox +### Invite a user to Permissions Management -Inviting a user to CloudKnox adds the user to the system and allows system administrators to assign permissions to those users. Follow the steps below to invite a user to CloudKnox. +Inviting a user to Permissions Management adds the user to the system and allows system administrators to assign permissions to those users. Follow the steps below to invite a user to Permissions Management. -1. To invite a user to CloudKnox, select the down caret icon next to the **User** icon on the right of the screen, and then select **User Management**. +1. To invite a user to Permissions Management, select the down caret icon next to the **User** icon on the right of the screen, and then select **User Management**. 2. From the **Users** tab, select **Invite User**. 3. From the **Set User Permission** window, in the **User** text box, enter the user's email address. -4. Under **Permission**, select the applicable option. +4. Under **Permission**, select the applicable option. - **Admin for All Authorization System Types**: **View**, **Control**, and **Approve** permissions for all Authorization System Types. 1. Select **Next**. - 2. Select **Requestor for User** for each authorization system, if applicable. + 2. Select **Requestor for User** for each authorization system, if applicable. A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. - 3. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. + 3. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - For example, a user may have various roles in different authorization systems, so they can select the **Add** icon and the **Users** icon to request access for all their accounts. + For example, a user may have various roles in different authorization systems, so they can select the **Add** icon and the **Users** icon to request access for all their accounts. 4. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - + - **Admin for Selected Authorization System Types**: **View**, **Control**, and **Approve** permissions for selected Authorization System Types. - - 1. Select **Viewer**, **Controller**, or **Approver** for the appropriate authorization system(s). + + 1. Select **Viewer**, **Controller**, or **Approver** for the appropriate authorization system(s). 2. Select **Next**. - 3. Select **Requestor for User** for each authorization system, if applicable. - + 3. Select **Requestor for User** for each authorization system, if applicable. + A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. - 4. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. + 4. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. 5. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - + - **Custom**: **View**, **Control**, and **Approve** permissions for specific accounts in **Auth System Types**. - + 1. Select **Next**. The default view displays the **List** section. - 2. Select the appropriate boxes for **Viewer**, **Controller**, or **Approver**. + 2. Select the appropriate boxes for **Viewer**, **Controller**, or **Approver**. - For access to all authorization system types, select **All (Current and Future)**. + For access to all authorization system types, select **All (Current and Future)**. 1. Select **Next**. - 1. Select **Requestor for User** for each authorization system, if applicable. + 1. Select **Requestor for User** for each authorization system, if applicable. A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. 5. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. 6. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. -5. Select **Save**. +5. Select **Save**. The following message displays in green at the top of the screen: **New User Has Been Invited Successfully**. @@ -92,18 +92,18 @@ Inviting a user to CloudKnox adds the user to the system and allows system admin ## Manage users for customers with SAML integration -Follow this process to invite users if the customer has enabled SAML integration with the CloudKnox application. +Follow this process to invite users if the customer has enabled SAML integration with the Permissions Management application. -### Create a permission in CloudKnox +### Create a permission in Permissions Management -Creating a permission directly in CloudKnox allows system administrators to assign permissions to specific users. The following steps help you to create a permission. +Creating a permission directly in Permissions Management allows system administrators to assign permissions to specific users. The following steps help you to create a permission. - On the right side of the screen, select the down caret icon next to **User**, and then select **User management**. - For **Users**: 1. To create permissions for a specific user, select the **Users** tab, and then select **Permission.** 2. From the **Set User Permission** window, enter the user's email address in the **User** text box. - 3. Under **Permission**, select the applicable button. Then expand menu to view instructions for each option. + 3. Under **Permission**, select the applicable button. Then expand menu to view instructions for each option. - **Admin for All Authorization System Types**: **View**, **Control**, and **Approve** permissions for all Authorization System Types. 1. Select **Next**. 2. Check **Requestor for User** for each authorization system, if applicable. @@ -112,12 +112,12 @@ Creating a permission directly in CloudKnox allows system administrators to assi 3. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. 4. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - **Admin for Selected Authorization System Types**: **View**, **Control**, and **Approve** permissions for selected Authorization System Types. - 1. Check **Viewer**, **Controller**, or **Approver** for the appropriate authorization system(s). + 1. Check **Viewer**, **Controller**, or **Approver** for the appropriate authorization system(s). 2. Select **Next**. 3. Check **Requestor for User** for each authorization system, if applicable. @@ -142,15 +142,15 @@ Creating a permission directly in CloudKnox allows system administrators to assi A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. 5. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - For example, a user can have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + For example, a user can have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. 6. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - 4. Select **Save**. - + 4. Select **Save**. + The following message displays in green at the top of the screen: - **New User Has Been Created Successfully**. - 5. The new user receives an email invitation to log in to CloudKnox. + **New User Has Been Created Successfully**. + 5. The new user receives an email invitation to log in to Permissions Management. ### The Pending tab @@ -165,7 +165,7 @@ Creating a permission directly in CloudKnox allows system administrators to assi - **Delete**: System administrators can delete a permission - **Reinvite**: System administrator can reinvite the permission if the user didn't receive the email invite - When a user registers with CloudKnox, they move from the **Pending** tab to the **Registered** tab. + When a user registers with Permissions Management, they move from the **Pending** tab to the **Registered** tab. ### The Registered tab @@ -176,7 +176,7 @@ Creating a permission directly in CloudKnox allows system administrators to assi - The **Permissions** column lists each authorization system, and each type of permission. If a user has all permissions for all authorization systems, **Admin for All Authorization Types** display across all columns. If a user only has some permissions, numbers display in each column they have permissions for. For example, if the number "3" is listed in the **Viewer** column, the user has viewer permission for three accounts within that authorization system. - - The **Joined On** column records when the user registered for CloudKnox. + - The **Joined On** column records when the user registered for Permissions Management. - The **Recent Activity** column displays the date when a user last performed an activity. - The **Search** button allows a system administrator to search for a user by name and all users who match the criteria displays. - The **Filters** option allows a system administrator to filter by specific details. When the filter option is selected, the **Authorization System** box displays. @@ -194,7 +194,7 @@ Creating a permission directly in CloudKnox allows system administrators to assi The identity provider creates groups. Some users may be part of multiple groups. In this case, the user's overall permissions is a union of the permissions assigned the various groups the user is a member of. - 3. Under **Permission**, select the applicable button and expand the menu to view instructions for each option. + 3. Under **Permission**, select the applicable button and expand the menu to view instructions for each option. - **Admin for All Authorization System Types**: **View**, **Control**, and **Approve** permissions for all Authorization System Types. 1. Select **Next**. @@ -208,48 +208,48 @@ Creating a permission directly in CloudKnox allows system administrators to assi 4. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - **Admin for Selected Authorization System Types**: **View**, **Control**, and **Approve** permissions for selected Authorization System Types. - 1. Check **Viewer**, **Controller**, or **Approver** for the appropriate authorization system(s). + 1. Check **Viewer**, **Controller**, or **Approver** for the appropriate authorization system(s). 2. Select **Next**. 3. Check **Requestor for User** for each authorization system, if applicable. A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. 4. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. 5. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - + - **Custom**: **View**, **Control**, and **Approve** permissions for specific accounts in Auth System Types. - 1. Select **Next**. + 1. Select **Next**. The default view displays the **List** section. - 2. Check the appropriate boxes for **Viewer**, **Controller**, or **Approver. + 2. Check the appropriate boxes for **Viewer**, **Controller**, or **Approver. For access to all authorization system types, select **All (Current and Future)**. 3. Select **Next**. - 4. Check **Requestor for User** for each authorization system, if applicable. + 4. Check **Requestor for User** for each authorization system, if applicable. A user must have an account with a valid email address in the authorization system to select **Requestor for User**. If a user doesn't exist in the authorization system, **Requestor for User** is grayed out. - 5. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. + 5. Optional: To request access for multiple other identities, under **Requestor for Other Users**, select **Add**, and then select **Users**. - For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. + For example, a user may have various roles in different authorization systems, so they can select **Add**, and then select **Users** to request access for all their accounts. 6. On the **Add Users** screen, enter the user's name or ID in the **User Search** box and select all applicable users. Then select **Add**. - 4. Select **Save**. - - The following message displays in green at the top of the screen: **New Group Has Been Created Successfully**. + 4. Select **Save**. + + The following message displays in green at the top of the screen: **New Group Has Been Created Successfully**. ### The Groups tab 1. The **Groups** tab provides a high-level overview of user details to system administrators: - + - The **Name** column lists the name of the group. - - The **Permissions** column lists each authorization system, and each type of permission. + - The **Permissions** column lists each authorization system, and each type of permission. If a group has all permissions for all authorization systems, **Admin for All Authorization Types** displays across all columns. @@ -262,7 +262,7 @@ Creating a permission directly in CloudKnox allows system administrators to assi - The **Filters** option allows a system administrator to filter by specific details. When the filter option is selected, the **Authorization System** box displays. To display all authorization system accounts, select **All**. Then select the appropriate boxes for the accounts that need to be viewed. - + 2. To make changes to the following, select the ellipses **(...)** in the far right column: - **View Permissions**: Displays a list of the accounts for which the group has permissions. - **Edit Permissions**: System administrators can edit a group's permissions. @@ -272,6 +272,5 @@ Creating a permission directly in CloudKnox allows system administrators to assi ## Next steps -- For information about how to view user management information, see [Manage users with the User management dashboard](cloudknox-ui-user-management.md). -- For information about how to create group-based permissions, see [Create group-based permissions](cloudknox-howto-create-group-based-permissions.md). - +- For information about how to view user management information, see [Manage users with the User management dashboard](ui-user-management.md). +- For information about how to create group-based permissions, see [Create group-based permissions](how-to-create-group-based-permissions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-integrations.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-integrations.md similarity index 100% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-integrations.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/product-integrations.md diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-permission-analytics.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-permission-analytics.md similarity index 83% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-permission-analytics.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/product-permission-analytics.md index 479f73496ba0..6c51f7ca8c57 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-permission-analytics.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-permission-analytics.md @@ -1,6 +1,6 @@ --- -title: Create and view permission analytics triggers in CloudKnox Permissions Management -description: How to create and view permission analytics triggers in the Permission analytics tab in CloudKnox Permissions Management. +title: Create and view permission analytics triggers in Permissions Management +description: How to create and view permission analytics triggers in the Permission analytics tab in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -12,24 +12,24 @@ ms.date: 02/23/2022 ms.author: kenwith --- -# Create and view permission analytics triggers +# Create and view permission analytics triggers > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how you can create and view permission analytics triggers in CloudKnox Permissions Management (CloudKnox). +This article describes how you can create and view permission analytics triggers in Permissions Management. ## View permission analytics triggers -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). 1. Select **Permission Analytics**, and then select the **Alerts** subtab. The **Alerts** subtab displays the following information: - **Alert Name**: Lists the name of the alert. - - To view the name, ID, role, domain, authorization system, statistical condition, anomaly date, and observance period, select **Alert name**. - - To expand the top information found with a graph of when the anomaly occurred, select **Details**. + - To view the name, ID, role, domain, authorization system, statistical condition, anomaly date, and observance period, select **Alert name**. + - To expand the top information found with a graph of when the anomaly occurred, select **Details**. - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. - **# of Occurrences**: Displays how many times the alert trigger has occurred. - **Task**: Displays how many tasks are affected by the alert @@ -39,7 +39,7 @@ This article describes how you can create and view permission analytics triggers - **Date/Time**: Displays the date and time of the alert. - **Date/Time (UTC)**: Lists the date and time of the alert in Coordinated Universal Time (UTC). -1. To filter the alerts, select the appropriate alert name or, from the **Alert Name** menu,select **All**. +1. To filter the alerts, select the appropriate alert name or, from the **Alert Name** menu,select **All**. - From the **Date** dropdown menu, select **Last 24 Hours**, **Last 2 Days**, **Last Week**, or **Custom Range**, and then select **Apply**. @@ -48,30 +48,30 @@ This article describes how you can create and view permission analytics triggers 1. To view the following details, select the ellipses (**...**): - **Details**: Displays **Authorization System Type**, **Authorization Systems**, **Resources**, **Tasks**, and **Identities** that matched the alert criteria. -1. To view specific matches, select **Resources**, **Tasks**, or **Identities**. +1. To view specific matches, select **Resources**, **Tasks**, or **Identities**. The **Activity** section displays details about the **Identity Name**, **Resource Name**, **Task Name**, **Date**, and **IP Address**. ## Create a permission analytics trigger -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). 1. Select **Permission Analytics**, select the **Alerts** subtab, and then select **Create Permission Analytics Trigger**. 1. In the **Alert Name** box, enter a name for the alert. 1. Select the **Authorization System**. 1. Select **Identity performed high number of tasks**, and then select **Next**. -1. On the **Authorization Systems** tab, select the appropriate accounts and folders, or select **All**. +1. On the **Authorization Systems** tab, select the appropriate accounts and folders, or select **All**. This screen defaults to the **List** view but can also be changed to the **Folder** view, and the applicable folder can be selected instead of individually by system. - The **Status** column displays if the authorization system is online or offline - - The **Controller** column displays if the controller is enabled or disabled. + - The **Controller** column displays if the controller is enabled or disabled. 1. On the **Configuration** tab, to update the **Time Interval**, select **90 Days**, **60 Days**, or **30 Days** from the **Time range** dropdown. 1. Select **Save**. ## View permission analytics alert triggers -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). 1. Select **Permission Analytics**, and then select the **Alert Triggers** subtab. The **Alert triggers** subtab displays the following information: @@ -96,7 +96,7 @@ This article describes how you can create and view permission analytics triggers ## Next steps -- For an overview on activity triggers, see [View information about activity triggers](cloudknox-ui-triggers.md). -- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](cloudknox-howto-create-alert-trigger.md). -- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](cloudknox-product-rule-based-anomalies.md). -- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](cloudknox-product-statistical-anomalies.md). +- For an overview on activity triggers, see [View information about activity triggers](ui-triggers.md). +- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](how-to-create-alert-trigger.md). +- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](product-rule-based-anomalies.md). +- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](product-statistical-anomalies.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-permissions-analytics-reports.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-permissions-analytics-reports.md similarity index 50% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-permissions-analytics-reports.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/product-permissions-analytics-reports.md index 7f2acdd173c0..21a8dbd44a40 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-permissions-analytics-reports.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-permissions-analytics-reports.md @@ -1,5 +1,5 @@ --- -title: Generate and download the Permissions analytics report in CloudKnox Permissions Management +title: Generate and download the Permissions analytics report in CloudKnox Permissions Management description: How to generate and download the Permissions analytics report in CloudKnox Permissions Management. services: active-directory author: kenwith @@ -23,7 +23,7 @@ This article describes how to generate and download the **Permissions analytics > [!NOTE] > This topic applies only to Amazon Web Services (AWS) users. -## Generate the Permissions analytics report +## Generate the Permissions analytics report 1. In the CloudKnox home page, select the **Reports** tab, and then select the **Systems Reports** subtab. @@ -34,71 +34,71 @@ This article describes how to generate and download the **Permissions analytics 1. For detailed information in the report, select the right arrow next to one of the following categories. Or, select the required category under the **Findings** column. - - **AWS** - - Inactive Identities - - Users - - Roles - - Resources - - Serverless Functions - - Inactive Groups - - Super Identities - - Users - - Roles - - Resources - - Serverless Functions - - Over-Provisioned Active Identities - - Users - - Roles - - Resources - - Serverless Functions - - PCI Distribution - - Privilege Escalation - - Users - - Roles - - Resources - - S3 Bucket Encryption - - Unencrypted Buckets - - SSE-S3 Buckets - - S3 Buckets Accessible Externally - - EC2 S3 Buckets Accessibility - - Open Security Groups - - Identities That Can Administer Security Tools - - Users - - Roles - - Resources - - Serverless Functions - - Identities That Can Access Secret Information - - Users - - Roles - - Resources - - Serverless Functions - - Cross-Account Access - - External Accounts - - Roles That Allow All Identities - - Hygiene: MFA Enforcement - - Hygiene: IAM Access Key Age - - Hygiene: Unused IAM Access Keys - - Exclude From Reports - - Users - - Roles - - Resources - - Serverless Functions - - Groups - - Security Groups - - S3 Buckets + - **AWS** + - Inactive Identities + - Users + - Roles + - Resources + - Serverless Functions + - Inactive Groups + - Super Identities + - Users + - Roles + - Resources + - Serverless Functions + - Over-Provisioned Active Identities + - Users + - Roles + - Resources + - Serverless Functions + - PCI Distribution + - Privilege Escalation + - Users + - Roles + - Resources + - S3 Bucket Encryption + - Unencrypted Buckets + - SSE-S3 Buckets + - S3 Buckets Accessible Externally + - EC2 S3 Buckets Accessibility + - Open Security Groups + - Identities That Can Administer Security Tools + - Users + - Roles + - Resources + - Serverless Functions + - Identities That Can Access Secret Information + - Users + - Roles + - Resources + - Serverless Functions + - Cross-Account Access + - External Accounts + - Roles That Allow All Identities + - Hygiene: MFA Enforcement + - Hygiene: IAM Access Key Age + - Hygiene: Unused IAM Access Keys + - Exclude From Reports + - Users + - Roles + - Resources + - Serverless Functions + - Groups + - Security Groups + - S3 Buckets 1. Select a category and view the following columns of information: - - **User**, **Role**, **Resource**, **Serverless Function Name**: Displays the name of the identity. - - **Authorization System**: Displays the authorization system to which the identity belongs. - - **Domain**: Displays the domain name to which the identity belongs. - - **Permissions**: Displays the maximum number of permissions that the identity can be granted. - - **Used**: Displays how many permissions that the identity has used. - - **Granted**: Displays how many permissions that the identity has been granted. - - **PCI**: Displays the permission creep index (PCI) score of the identity. - - **Date Last Active On**: Displays the date that the identity was last active. - - **Date Created On**: Displays the date when the identity was created. + - **User**, **Role**, **Resource**, **Serverless Function Name**: Displays the name of the identity. + - **Authorization System**: Displays the authorization system to which the identity belongs. + - **Domain**: Displays the domain name to which the identity belongs. + - **Permissions**: Displays the maximum number of permissions that the identity can be granted. + - **Used**: Displays how many permissions that the identity has used. + - **Granted**: Displays how many permissions that the identity has been granted. + - **PCI**: Displays the permission creep index (PCI) score of the identity. + - **Date Last Active On**: Displays the date that the identity was last active. + - **Date Created On**: Displays the date when the identity was created. @@ -108,7 +108,7 @@ This article describes how to generate and download the **Permissions analytics 1. Select one of the categories from the **Permissions Analytics Report**. 1. Select the identity name to which you want to add a tag. Then, select the checkbox at the top to select all identities. 1. Select **Add Tag**. -1. In the **Tag** column: +1. In the **Tag** column: - To select from the available options from the list, select **Select a Tag**. - To search for a tag, enter the tag name. - To create a new custom tag, select **New Custom Tag**. @@ -117,10 +117,10 @@ This article describes how to generate and download the **Permissions analytics 1. In the **Value (optional)** box, enter a value, if necessary. 1. Select **Save**.---> - + ## Next steps -- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](cloudknox-product-reports.md). -- For a detailed overview of available system reports, see [View a list and description of system reports](cloudknox-all-reports.md). -- For information about how to generate and view a system report, see [Generate and view a system report](cloudknox-report-view-system-report.md). -- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](cloudknox-report-view-system-report.md). +- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](product-reports.md). +- For a detailed overview of available system reports, see [View a list and description of system reports](all-reports.md). +- For information about how to generate and view a system report, see [Generate and view a system report](report-view-system-report.md). +- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](report-view-system-report.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/product-reports.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-reports.md new file mode 100644 index 000000000000..5539d0460aad --- /dev/null +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-reports.md @@ -0,0 +1,141 @@ +--- +title: View system reports in the Reports dashboard in CloudKnox Permissions Management +description: How to view system reports in the Reports dashboard in CloudKnox Permissions Management. +services: active-directory +author: kenwith +manager: rkarlin +ms.service: active-directory +ms.subservice: ciem +ms.workload: identity +ms.topic: how-to +ms.date: 02/23/2022 +ms.author: kenwith +--- + +# View system reports in the Reports dashboard + +> [!IMPORTANT] +> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. + +CloudKnox Permissions Management (CloudKnox) has various types of system report types available that capture specific sets of data. These reports allow management to: + +- Make timely decisions. +- Analyze trends and system/user performance. +- Identify trends in data and high risk areas so that management can address issues more quickly and improve their efficiency. + +## Explore the Reports dashboard + +The **Reports** dashboard provides a table of information with both system reports and custom reports. The **Reports** dashboard defaults to the **System Reports** tab, which has the following details: + +- **Report Name**: The name of the report. +- **Category**: The type of report. For example, **Permission**. +- **Authorization Systems**: Displays which authorizations the custom report applies to. +- **Format**: Displays the output format the report can be generated in. For example, comma-separated values (CSV) format, portable document format (PDF), or Microsoft Excel Open XML Spreadsheet (XLSX) format. + + - To download a report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. + + The following message displays across the top of the screen in green if the download is successful: **Successfully Started To Generate On Demand Report**. + +## Available system reports + +CloudKnox offers the following reports for management associated with the authorization systems noted in parenthesis: + +- **Access Key Entitlements And Usage**: + - **Summary of report**: Provides information about access key, for example, permissions, usage, and rotation date. + - **Applies to**: Amazon Web Services (AWS) and Microsoft Azure + - **Report output type**: CSV + - **Ability to collate report**: Yes + - **Type of report**: **Summary** or **Detailed** + - **Use cases**: + - The access key age, last rotation date, and last usage date is available in the summary report to help with key rotation. + - The granted task and Permissions creep index (PCI) score to take action on the keys. + +- **User Entitlements And Usage**: + - **Summary of report**: Provides information about the identities' permissions, for example, entitlement, usage, and PCI. + - **Applies to**: AWS, Azure, and Google Cloud Platform (GCP) + - **Report output type**: CSV + - **Ability to collate report**: Yes + - **Type of report**: **Summary** or **Detailed** + - **Use cases**: + - The data displayed on the **Usage Analytics** screen is downloaded as part of the **Summary** report. The user's detailed permissions usage is listed in the **Detailed** report. + +- **Group Entitlements And Usage**: + - **Summary of report**: Provides information about the group's permissions, for example, entitlement, usage, and PCI. + - **Applies to**: AWS, Azure, and GCP + - **Report output type**: CSV + - **Ability to collate report**: Yes + - **Type of report**: **Summary** + - **Use cases**: + - All group level entitlements and permission assignments, PCIs, and the number of members are listed as part of this report. + +- **Identity Permissions**: + - **Summary of report**: Report on identities that have specific permissions, for example, identities that have permission to delete any S3 buckets. + - **Applies to**: AWS, Azure, and GCP + - **Report output type**: CSV + - **Ability to collate report**: No + - **Type of report**: **Summary** + - **Use cases**: + - Any task usage or specific task usage via User/Group/Role/App can be tracked with this report. + +- **Identity privilege activity report** + - **Summary of report**: Provides information about permission changes that have occurred in the selected duration. + - **Applies to**: AWS, Azure, and GCP + - **Report output type**: PDF + - **Ability to collate report**: No + - **Type of report**: **Summary** + - **Use cases**: + - Any identity permission change can be captured using this report. + - The **Identity Privilege Activity** report has the following main sections: **User Summary**, **Group Summary**, **Role Summary**, and **Delete Task Summary**. + - The **User** summary lists the current granted permissions and high-risk permissions and resources accessed in 1 day, 7 days, or 30 days. There are subsections for newly added or deleted users, users with PCI change, and High-risk active/inactive users. + - The **Group** summary lists the administrator level groups with the current granted permissions and high-risk permissions and resources accessed in 1 day, 7 days, or 30 days. There are subsections for newly added or deleted groups, groups with PCI change, and High-risk active/inactive groups. + - The **Role summary** lists similar details as **Group Summary**. + - The **Delete Task summary** section lists the number of times the **Delete task** has been executed in the given time period. + +- **Permissions Analytics Report** + - **Summary of report**: Provides information about the violation of key security best practices. + - **Applies to**: AWS, Azure, and GCP + - **Report output type**: CSV + - **Ability to collate report**: Yes + - **Type of report**: **Detailed** + - **Use cases**: + - This report lists the different key findings in the selected auth systems. The key findings include super identities, inactive identities, over provisioned active identities, storage bucket hygiene, and access key age (for AWS only). The report helps administrators to visualize the findings across the organization. + + For more information about this report, see [Permissions analytics report](product-permissions-analytics-reports.md). + +- **Role/Policy Details** + - **Summary of report**: Provides information about roles and policies. + - **Applies to**: AWS, Azure, GCP + - **Report output type**: CSV + - **Ability to collate report**: No + - **Type of report**: **Summary** + - **Use cases**: + - Assigned/Unassigned, custom/system policy, and the used/unused condition is captured in this report for any specific, or all, AWS accounts. Similar data can be captured for Azure/GCP for the assigned/unassigned roles. + +- **PCI History** + - **Summary of report**: Provides a report of privilege creep index (PCI) history. + - **Applies to**: AWS, Azure, GCP + - **Report output type**: CSV + - **Ability to collate report**: Yes + - **Type of report**: **Summary** + - **Use cases**: + - This report plots the trend of the PCI by displaying the monthly PCI history for each authorization system. + +- **All Permissions for Identity** + - **Summary of report**: Provides results of all permissions for identities. + - **Applies to**: AWS, Azure, GCP + - **Report output type**: CSV + - **Ability to collate report**: Yes + - **Type of report**: **Detailed** + - **Use cases**: + - This report lists all the assigned permissions for the selected identities. + + + + +## Next steps + +- For a detailed overview of available system reports, see [View a list and description of system reports](all-reports.md). +- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](report-view-system-report.md). +- For information about how to create and view a custom report, see [Generate and view a custom report](report-create-custom-report.md). +- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-rule-based-anomalies.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-rule-based-anomalies.md similarity index 82% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-rule-based-anomalies.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/product-rule-based-anomalies.md index f9af667bb858..2d014ae108e0 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-rule-based-anomalies.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-rule-based-anomalies.md @@ -1,6 +1,6 @@ --- -title: Create and view rule-based anomalies and anomaly triggers in CloudKnox Permissions Management -description: How to create and view rule-based anomalies and anomaly triggers in CloudKnox Permissions Management. +title: Create and view rule-based anomalies and anomaly triggers in Permissions Management +description: How to create and view rule-based anomalies and anomaly triggers in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -12,23 +12,23 @@ ms.date: 02/23/2022 ms.author: kenwith --- -# Create and view rule-based anomaly alerts and anomaly triggers +# Create and view rule-based anomaly alerts and anomaly triggers > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -Rule-based anomalies identify recent activity in CloudKnox Permissions Management (CloudKnox) that is determined to be unusual based on explicit rules defined in the activity trigger. The goal of rule-based anomaly is high precision detection. +Rule-based anomalies identify recent activity in Permissions Management that is determined to be unusual based on explicit rules defined in the activity trigger. The goal of rule-based anomaly is high precision detection. ## View rule-based anomaly alerts -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). 1. Select **Rule-Based Anomaly**, and then select the **Alerts** subtab. The **Alerts** subtab displays the following information: - **Alert Name**: Lists the name of the alert. - + - To view the specific identity, resource, and task names that occurred during the alert collection period, select the **Alert Name**. - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. @@ -36,18 +36,18 @@ Rule-based anomalies identify recent activity in CloudKnox Permissions Managemen - **Task**: How many tasks performed are triggered by the alert. - **Resources**: How many resources accessed are triggered by the alert. - **Identity**: How many identities performing unusual behavior are triggered by the alert. - - **Authorization System**: Displays which authorization systems the alert applies to, Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). + - **Authorization System**: Displays which authorization systems the alert applies to, Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). - **Date/Time**: Lists the date and time of the alert. - **Date/Time (UTC)**: Lists the date and time of the alert in Coordinated Universal Time (UTC). - + 1. To filter alerts: - - From the **Alert Name** dropdown, select **All** or the appropriate alert name. + - From the **Alert Name** dropdown, select **All** or the appropriate alert name. - From the **Date** dropdown menu, select **Last 24 Hours**, **Last 2 Days**, **Last Week**, or **Custom Range**, and select **Apply**. - If you select **Custom Range**, also enter **From** and **To** duration settings. -1. To view details that match the alert criteria, select the ellipses (**...**). +1. To view details that match the alert criteria, select the ellipses (**...**). - **View Trigger**: Displays the current trigger settings and applicable authorization system details - **Details**: Displays details about **Authorization System Type**, **Authorization Systems**, **Resources**, **Tasks**, **Identities**, and **Activity** @@ -55,7 +55,7 @@ Rule-based anomalies identify recent activity in CloudKnox Permissions Managemen ## Create a rule-based anomaly trigger -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). 1. Select **Rule-Based Anomaly**, and then select the **Alerts** subtab. 1. Select **Create Anomaly Trigger**. @@ -66,11 +66,11 @@ Rule-based anomalies identify recent activity in CloudKnox Permissions Managemen - **Identity Performs a Particular Task for the First Time**: The identity does a specific task for the first time during the specified time interval. - **Identity Performs a Task for the First Time**: The identity performs any task for the first time during the specified time interval 1. Select **Next**. -1. On the **Authorization Systems** tab, select the available authorization systems and folders, or select **All**. +1. On the **Authorization Systems** tab, select the available authorization systems and folders, or select **All**. - This screen defaults to **List** view, but you can change it to **Folders** view. You can select the applicable folder instead of individually selecting by authorization system. + This screen defaults to **List** view, but you can change it to **Folders** view. You can select the applicable folder instead of individually selecting by authorization system. - - The **Status** column displays if the authorization system is online or offline. + - The **Status** column displays if the authorization system is online or offline. - The **Controller** column displays if the controller is enabled or disabled. 1. On the **Configuration** tab, to update the **Time Interval**, select **90 Days**, **60 Days**, or **30 Days** from the **Time range** dropdown. @@ -78,9 +78,9 @@ Rule-based anomalies identify recent activity in CloudKnox Permissions Managemen ## View a rule-based anomaly trigger -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). 1. Select **Rule-Based Anomaly**, and then select the **Alert Triggers** subtab. - + The **Alert Triggers** subtab displays the following information: - **Alerts**: Displays the name of the alert. @@ -89,13 +89,13 @@ Rule-based anomalies identify recent activity in CloudKnox Permissions Managemen - **Created By**: Displays the email address of the user who created the alert. - **Last Modified By**: Displays the email address of the user who last modified the alert. - **Last Modified On**: Displays the date and time the trigger was last modified. - - **Subscription**: Subscribes you to receive alert emails. Switches between **On** and **Off**. + - **Subscription**: Subscribes you to receive alert emails. Switches between **On** and **Off**. 1. To view other options available to you, select the ellipses (**...**), and then select from the available options: If the **Subscription** is **On**, the following options are available: - - **Edit**: Enables you to modify alert parameters. + - **Edit**: Enables you to modify alert parameters. Only the user who created the alert can edit the trigger screen, rename an alert, deactivate an alert, and delete an alert. Changes made by other users aren't saved. @@ -103,7 +103,7 @@ Rule-based anomalies identify recent activity in CloudKnox Permissions Managemen - **Rename**: Enter the new name of the query, and then select **Save.** - **Deactivate**: The alert will still be listed, but will no longer send emails to subscribed users. - **Activate**: Activate the alert trigger and start sending emails to subscribed users. - - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger. + - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger. - **Delete**: Delete the alert. If the **Subscription** is **Off**, the following options are available: @@ -117,7 +117,7 @@ Rule-based anomalies identify recent activity in CloudKnox Permissions Managemen ## Next steps -- For an overview on activity triggers, see [View information about activity triggers](cloudknox-ui-triggers.md). -- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](cloudknox-howto-create-alert-trigger.md). -- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](cloudknox-product-statistical-anomalies.md). -- For information on permission analytics triggers, see [Create and view permission analytics triggers](cloudknox-product-permission-analytics.md). +- For an overview on activity triggers, see [View information about activity triggers](ui-triggers.md). +- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](how-to-create-alert-trigger.md). +- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](product-statistical-anomalies.md). +- For information on permission analytics triggers, see [Create and view permission analytics triggers](product-permission-analytics.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-statistical-anomalies.md b/articles/active-directory/cloud-infrastructure-entitlement-management/product-statistical-anomalies.md similarity index 88% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-statistical-anomalies.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/product-statistical-anomalies.md index ebddfd89f42d..bcef698e31a8 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-product-statistical-anomalies.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/product-statistical-anomalies.md @@ -1,6 +1,6 @@ --- -title: Create and view statistical anomalies and anomaly triggers in CloudKnox Permissions Management -description: How to create and view statistical anomalies and anomaly triggers in the Statistical Anomaly tab in CloudKnox Permissions Management. +title: Create and view statistical anomalies and anomaly triggers in Permissions Management +description: How to create and view statistical anomalies and anomaly triggers in the Statistical Anomaly tab in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,25 +15,25 @@ ms.author: kenwith # Create and view statistical anomalies and anomaly triggers > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. Statistical anomalies can detect outliers in an identity's behavior if recent activity is determined to be unusual based on models defined in an activity trigger. The goal of this anomaly trigger is a high recall rate. ## View statistical anomalies in an identity's behavior -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). 1. Select **Statistical Anomaly**, and then select the **Alerts** subtab. The **Alerts** subtab displays the following information: - **Alert Name**: Lists the name of the alert. - - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. + - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. - **# of Occurrences**: Displays how many times the alert trigger has occurred. - **Authorization System**: Displays which authorization systems the alert applies to. - **Date/Time**: Lists the day of the outlier occurring. - **Date/Time (UTC)**: Lists the day of the outlier occurring in Coordinated Universal Time (UTC). - + 1. To filter the alerts based on name, select the appropriate alert name or choose **All** from the **Alert Name** dropdown menu, and select **Apply**. 1. To filter the alerts based on alert time, select **Last 24 Hours**, **Last 2 Days**, **Last Week**, or **Custom Range** from the **Date** dropdown menu, and select **Apply**. @@ -41,11 +41,11 @@ Statistical anomalies can detect outliers in an identity's behavior if recent ac - **Details**, this brings you to an Alert Summary view with **Authorization System**, **Statistical Model** and **Observance Period** displayed along with a table with a row per identity triggering this alert. From here you can click: - **Details**: Displays graph(s) highlighting the anomaly with context, and up to the top 3 actions performed on the day of the anomaly - **View Trigger**: Displays the current trigger settings and applicable authorization system details - - **View Trigger**: Displays the current trigger settings and applicable authorization system details + - **View Trigger**: Displays the current trigger settings and applicable authorization system details ## Create a statistical anomaly trigger -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). 1. Select **Statistical Anomaly**, select the **Alerts** subtab, and then select **Create Alert Trigger**. 1. Enter a name for the alert in the **Alert Name** box. 1. Select the **Authorization System**, Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). @@ -65,11 +65,11 @@ Statistical anomalies can detect outliers in an identity's behavior if recent ac - **Identity Performed Tasks with Multiple Unusual Patterns**: The identity has several unusual patterns in the tasks performed by the identity as established by their baseline in the observance period. 1. Select **Next**. -1. On the **Authorization Systems** tab, select the appropriate systems, or, to select all systems, select **All**. +1. On the **Authorization Systems** tab, select the appropriate systems, or, to select all systems, select **All**. - The screen defaults to the **List** view but you can switch to **Folder** view using the menu, and then select the applicable folder instead of individually by system. + The screen defaults to the **List** view but you can switch to **Folder** view using the menu, and then select the applicable folder instead of individually by system. - - The **Status** column displays if the authorization system is online or offline. + - The **Status** column displays if the authorization system is online or offline. - The **Controller** column displays if the controller is enabled or disabled. @@ -78,13 +78,13 @@ Statistical anomalies can detect outliers in an identity's behavior if recent ac ## View statistical anomaly triggers -1. In the CloudKnox home page, select **Activity triggers** (the bell icon). +1. In the Permissions Management home page, select **Activity triggers** (the bell icon). 1. Select **Statistical Anomaly**, and then select the **Alert Triggers** subtab. The **Alert Triggers** subtab displays the following information: - **Alert**: Displays the name of the alert. - - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. + - **Anomaly Alert Rule**: Displays the name of the rule select when creating the alert. - **# of users subscribed**: Displays the number of users subscribed to the alert. - **Created By**: Displays the email address of the user who created the alert. - **Last Modified By**: Displays the email address of the user who last modified the alert. @@ -96,7 +96,7 @@ Statistical anomalies can detect outliers in an identity's behavior if recent ac 1. To view other options available to you, select the ellipses (**...**), and then select from the available options: If the **Subscription** is **On**, the following options are available: - - **Edit**: Enables you to modify alert parameters + - **Edit**: Enables you to modify alert parameters > [!NOTE] > Only the user who created the alert can perform the following actions: edit the trigger screen, rename an alert, deactivate an alert, and delete an alert. Changes made by other users aren't saved. @@ -106,12 +106,12 @@ Statistical anomalies can detect outliers in an identity's behavior if recent ac - **Activate**: Activate the alert trigger and start sending emails to subscribed users. - **Notification Settings**: View the **Email** of users who are subscribed to the alert trigger. - **Delete**: Delete the alert. - + If the **Subscription** is **Off**, the following options are available: - **View**: View details of the alert trigger. - **Notification settings**: View the **Email** of users who are subscribed to the alert trigger. - **Duplicate**: Create a duplicate copy of the selected alert trigger. - + 1. Select **Apply**. @@ -119,7 +119,7 @@ Statistical anomalies can detect outliers in an identity's behavior if recent ac ## Next steps -- For an overview on activity triggers, see [View information about activity triggers](cloudknox-ui-triggers.md). -- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](cloudknox-howto-create-alert-trigger.md). -- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](cloudknox-product-rule-based-anomalies.md). -- For information on permission analytics triggers, see [Create and view permission analytics triggers](cloudknox-product-permission-analytics.md). +- For an overview on activity triggers, see [View information about activity triggers](ui-triggers.md). +- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](how-to-create-alert-trigger.md). +- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](product-rule-based-anomalies.md). +- For information on permission analytics triggers, see [Create and view permission analytics triggers](product-permission-analytics.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-report-create-custom-report.md b/articles/active-directory/cloud-infrastructure-entitlement-management/report-create-custom-report.md similarity index 63% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-report-create-custom-report.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/report-create-custom-report.md index 2f7d8b0c51ff..203365d8a884 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-report-create-custom-report.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/report-create-custom-report.md @@ -1,6 +1,6 @@ --- -title: Create, view, and share a custom report a custom report in CloudKnox Permissions Management -description: How to create, view, and share a custom report in the CloudKnox Permissions Management. +title: Create, view, and share a custom report a custom report in Permissions Management +description: How to create, view, and share a custom report in the Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,14 +15,14 @@ ms.author: kenwith # Create, view, and share a custom report > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how to create, view, and share a custom report in CloudKnox Permissions Management (CloudKnox). +This article describes how to create, view, and share a custom report in Permissions Management. -## Create a custom report +## Create a custom report -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Custom Reports** subtab. +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Custom Reports** subtab. 1. Select **New Custom Report**. 1. In the **Report Name** box, enter a name for your report. 1. From the **Report Based on** list: @@ -37,7 +37,7 @@ This article describes how to create, view, and share a custom report in CloudKn 1. Select the **Report Format** subtab, and then select the format for your report: comma-separated values (**CSV**) file, portable document format (**PDF**), or Microsoft Excel Open XML Spreadsheet (**XLSX**) file. 1. Select the **Schedule** tab, and then select the frequency for your report, from **None** up to **Monthly**. - - For **Hourly** and **Daily** options, set the start date by choosing from the **Calendar** dropdown, and can input a specific time of the day they want to receive the report. + - For **Hourly** and **Daily** options, set the start date by choosing from the **Calendar** dropdown, and can input a specific time of the day they want to receive the report. In addition to date and time, the **Weekly** and **Biweekly** provide options for you to select on which day(s)of the week the report should repeat. @@ -46,9 +46,9 @@ This article describes how to create, view, and share a custom report in CloudKn The following message displays across the top of the screen in green if the download is successful: **Report has been created**. The report name appears in the **Reports** table. -## View a custom report +## View a custom report -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Custom Reports** subtab. +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Custom Reports** subtab. The **Custom Reports** tab displays the following information in the **Reports** table: @@ -63,21 +63,21 @@ The report name appears in the **Reports** table. ## Share a custom report -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Custom Reports** subtab. +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Custom Reports** subtab. 1. In the **Reports** table, select a report and then select the ellipses (**...**) icon. 1. In the **Report Settings** box, select **Share with**. -1. In the **Search Email to add** box, enter the name of other CloudKnox user(s). +1. In the **Search Email to add** box, enter the name of other Permissions Management user(s). - You can only share reports with other CloudKnox users. + You can only share reports with other Permissions Management users. 1. Select **Save**. -## Search for a custom report +## Search for a custom report -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Custom Reports** subtab. +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Custom Reports** subtab. 1. On the **Custom Reports** tab, select **Search**. 1. In the **Search** box, enter the name of the report you want. - The **Custom Reports** tab displays a list of reports that match your search criteria. + The **Custom Reports** tab displays a list of reports that match your search criteria. 1. Select the report you want. 1. To download a report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. 1. To refresh the list of reports, select **Reload**. @@ -85,44 +85,44 @@ The report name appears in the **Reports** table. ## Modify a saved or scheduled custom report -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Custom Reports** subtab. +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Custom Reports** subtab. 1. Hover over the report name on the **Custom Reports** tab. - To rename the report, select **Edit** (the pencil icon), and enter a new name. - To change the settings for your report, select **Settings** (the gear icon). Make your changes, and then select **Save**. - - To download a copy of the report, select the **Down arrow** icon. + - To download a copy of the report, select the **Down arrow** icon. 1. To perform other actions to the report, select the ellipses (**...**) icon: - - **Download**: Downloads a copy of the report. + - **Download**: Downloads a copy of the report. - - **Report Settings**: Displays the settings for the report, including scheduling, sharing the report, and so on. + - **Report Settings**: Displays the settings for the report, including scheduling, sharing the report, and so on. - - **Duplicate**: Creates a duplicate of the report called **"Copy of XXX"**. Any reports not created by the current user are listed as **Duplicate**. + - **Duplicate**: Creates a duplicate of the report called **"Copy of XXX"**. Any reports not created by the current user are listed as **Duplicate**. - When you select **Duplicate**, a box appears asking if you're sure you want to create a duplicate. Select **Confirm**. + When you select **Duplicate**, a box appears asking if you're sure you want to create a duplicate. Select **Confirm**. When the report is successfully duplicated, the following message displays: **Report generated successfully**. - - **API Settings**: Download the report using your Application Programming Interface (API) settings. + - **API Settings**: Download the report using your Application Programming Interface (API) settings. - When this option is selected, the **API Settings** window opens and displays the **Report ID** and **Secret Key**. Select **Generate New Key**. + When this option is selected, the **API Settings** window opens and displays the **Report ID** and **Secret Key**. Select **Generate New Key**. - - **Delete**: Select this option to delete the report. + - **Delete**: Select this option to delete the report. - After selecting **Delete**, a pop-up box appears asking if the user is sure they want to delete the report. Select **Confirm**. + After selecting **Delete**, a pop-up box appears asking if the user is sure they want to delete the report. Select **Confirm**. **Report is deleted successfully** appears across the top of the screen in green if successfully deleted. - - **Unsubscribe**: Unsubscribe the user from receiving scheduled reports and notifications. + - **Unsubscribe**: Unsubscribe the user from receiving scheduled reports and notifications. - This option is only available after a report has been scheduled. + This option is only available after a report has been scheduled. ## Next steps -- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](cloudknox-product-reports.md). -- For a detailed overview of available system reports, see [View a list and description of system reports](cloudknox-all-reports.md). -- For information about how to generate and view a system report, see [Generate and view a system report](cloudknox-report-view-system-report.md). -- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](cloudknox-product-permissions-analytics-reports.md). +- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](product-reports.md). +- For a detailed overview of available system reports, see [View a list and description of system reports](all-reports.md). +- For information about how to generate and view a system report, see [Generate and view a system report](report-view-system-report.md). +- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-report-view-system-report.md b/articles/active-directory/cloud-infrastructure-entitlement-management/report-view-system-report.md similarity index 76% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-report-view-system-report.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/report-view-system-report.md index 35563a9b5634..d93af027fb2f 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-report-view-system-report.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/report-view-system-report.md @@ -1,6 +1,6 @@ --- -title: Generate and view a system report in CloudKnox Permissions Management -description: How to generate and view a system report in the CloudKnox Permissions Management. +title: Generate and view a system report in Permissions Management +description: How to generate and view a system report in the Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,14 +15,14 @@ ms.author: kenwith # Generate and view a system report > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how to generate and view a system report in CloudKnox Permissions Management (CloudKnox). +This article describes how to generate and view a system report in Permissions Management. -## Generate a system report +## Generate a system report -1. In the CloudKnox home page, select the **Reports** tab, and then select the **Systems Reports** subtab. +1. In the Permissions Management home page, select the **Reports** tab, and then select the **Systems Reports** subtab. The **Systems Reports** subtab displays the following options in the **Reports** table: - **Report Name**: The name of the report. @@ -41,12 +41,12 @@ This article describes how to generate and view a system report in CloudKnox Per 1. To refresh the list of reports, select **Reload**. -## Search for a system report +## Search for a system report 1. On the **Systems Reports** subtab, select **Search**. 1. In the **Search** box, enter the name of the report you want. - The **Systems Reports** subtab displays a list of reports that match your search criteria. + The **Systems Reports** subtab displays a list of reports that match your search criteria. 1. Select a report from the **Report Name** column. 1. To download a report, select the down arrow to the right of the report name, or from the ellipses **(...)** menu, select **Download**. 1. To refresh the list of reports, select **Reload**. @@ -54,7 +54,7 @@ This article describes how to generate and view a system report in CloudKnox Per ## Next steps -- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](cloudknox-product-reports.md). -- For a detailed overview of available system reports, see [View a list and description of system reports](cloudknox-all-reports.md). -- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](cloudknox-report-view-system-report.md). -- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](cloudknox-product-permissions-analytics-reports.md). +- For information on how to view system reports in the **Reports** dashboard, see [View system reports in the Reports dashboard](product-reports.md). +- For a detailed overview of available system reports, see [View a list and description of system reports](all-reports.md). +- For information about how to create, view, and share a system report, see [Create, view, and share a custom report](report-view-system-report.md). +- For information about how to create and view the Permissions analytics report, see [Generate and download the Permissions analytics report](product-permissions-analytics-reports.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-training-videos.md b/articles/active-directory/cloud-infrastructure-entitlement-management/training-videos.md similarity index 91% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-training-videos.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/training-videos.md index 5e92b74f6f40..7c4b7650af8a 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-training-videos.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/training-videos.md @@ -36,6 +36,6 @@ To view a video on how to configure and onboard Google Cloud Platform (GCP) acco ## Next steps -- For an overview of CloudKnox, see [What's CloudKnox Permissions Management?](cloudknox-overview.md) -- For a list of frequently asked questions (FAQs) about CloudKnox, see [FAQs](cloudknox-faqs.md). -- For information on how to start viewing information about your authorization system in CloudKnox, see [View key statistics and data about your authorization system](cloudknox-ui-dashboard.md). \ No newline at end of file +- For an overview of CloudKnox, see [What's CloudKnox Permissions Management?](overview.md) +- For a list of frequently asked questions (FAQs) about CloudKnox, see [FAQs](faqs.md). +- For information on how to start viewing information about your authorization system in CloudKnox, see [View key statistics and data about your authorization system](ui-dashboard.md). \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-troubleshoot.md b/articles/active-directory/cloud-infrastructure-entitlement-management/troubleshoot.md similarity index 59% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-troubleshoot.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/troubleshoot.md index 8d685638b9e0..fe392e6558e7 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-troubleshoot.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/troubleshoot.md @@ -1,6 +1,6 @@ --- -title: Troubleshoot issues with CloudKnox Permissions Management -description: Troubleshoot issues with CloudKnox Permissions Management +title: Troubleshoot issues with Permissions Management +description: Troubleshoot issues with Permissions Management services: active-directory author: kenwith manager: rkarlin @@ -12,31 +12,31 @@ ms.date: 02/23/2022 ms.author: kenwith --- -# Troubleshoot issues with CloudKnox Permissions Management +# Troubleshoot issues with Permissions Management > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This section answers troubleshoot issues with CloudKnox Permissions Management (CloudKnox). +This section answers troubleshoot issues with Permissions Management. ## One time passcode (OTP) email ### The user didn't receive the OTP email. -- Check your junk or Spam mail folder for the email. +- Check your junk or Spam mail folder for the email. ## Reports ### The individual files are generated according to the authorization system (subscription/account/project). -- Select the **Collate** option in the **Custom Report** screen in the CloudKnox **Reports** tab. +- Select the **Collate** option in the **Custom Report** screen in the Permissions Management **Reports** tab. ## Data collection in AWS -### Data collection > AWS Authorization system data collection status is offline. Upload and transform is also offline. +### Data collection > AWS Authorization system data collection status is offline. Upload and transform is also offline. -- Check the CloudKnox-related role that exists in these accounts. -- Validate the trust relationship with the OpenID Connect (OIDC) role. +- Check the Permissions Management-related role that exists in these accounts. +- Validate the trust relationship with the OpenID Connect (OIDC) role. diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-audit-trail.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-audit-trail.md similarity index 75% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-audit-trail.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/ui-audit-trail.md index 6f854bb414c7..43ee6f14eec9 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-audit-trail.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-audit-trail.md @@ -1,6 +1,6 @@ --- -title: Use queries to see how users access information in an authorization system in CloudKnox Permissions Management -description: How to use queries to see how users access information in an authorization system in CloudKnox Permissions Management. +title: Use queries to see how users access information in an authorization system in Permissions Management +description: How to use queries to see how users access information in an authorization system in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,21 +15,21 @@ ms.author: kenwith # Use queries to see how users access information > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The **Audit** dashboard in CloudKnox Permissions Management (CloudKnox) provides an overview of queries a CloudKnox user has created to review how users access their authorization systems and accounts. +The **Audit** dashboard in Permissions Management provides an overview of queries a Permissions Management user has created to review how users access their authorization systems and accounts. This article provides an overview of the components of the **Audit** dashboard. ## View information in the Audit dashboard -1. In CloudKnox, select the **Audit** tab. +1. In Permissions Management, select the **Audit** tab. - CloudKnox displays the query options available to you. + Permissions Management displays the query options available to you. -1. The following options display at the top of the **Audit** dashboard: +1. The following options display at the top of the **Audit** dashboard: - A tab for each existing query. Select the tab to see details about the query. - **New Query**: Select the tab to create a new query. @@ -39,9 +39,9 @@ This article provides an overview of the components of the **Audit** dashboard. 1. To return to the main page, select **Back to Audit Trail**. -## Use a query to view information +## Use a query to view information -1. In CloudKnox, select the **Audit** tab. +1. In Permissions Management, select the **Audit** tab. 1. The **New query** tab displays the following options: - **Authorization Systems Type**: A list of your authorization systems: Amazon Web Services (**AWS**), Microsoft Azure (**Azure**), Google Cloud Platform (**GCP**), or Platform (**Platform**). @@ -51,7 +51,7 @@ This article provides an overview of the components of the **Audit** dashboard. - To display a **List** of accounts and **Folders** in the authorization system, select the down arrow, and then select **Apply**. 1. To add an **Audit Trail Condition**, select **Conditions** (the eye icon), select the conditions you want to add, and then select **Close**. - + 1. To edit existing parameters, select **Edit** (the pencil icon). 1. To add the parameter that you created to the query, select **Add**. @@ -70,6 +70,6 @@ This article provides an overview of the components of the **Audit** dashboard. ## Next steps -- For information on how to filter and view user activity, see [Filter and query user activity](cloudknox-product-audit-trail.md). -- For information on how to create a query,see [Create a custom query](cloudknox-howto-create-custom-queries.md). -- For information on how to generate an on-demand report from a query, see [Generate an on-demand report from a query](cloudknox-howto-audit-trail-results.md). +- For information on how to filter and view user activity, see [Filter and query user activity](product-audit-trail.md). +- For information on how to create a query,see [Create a custom query](how-to-create-custom-queries.md). +- For information on how to generate an on-demand report from a query, see [Generate an on-demand report from a query](how-to-audit-trail-results.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-autopilot.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-autopilot.md similarity index 75% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-autopilot.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/ui-autopilot.md index 586576497eb4..0d4f53e45ee7 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-autopilot.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-autopilot.md @@ -1,6 +1,6 @@ --- -title: View rules in the Autopilot dashboard in CloudKnox Permissions Management -description: How to view rules in the Autopilot dashboard in CloudKnox Permissions Management. +title: View rules in the Autopilot dashboard in Permissions Management +description: How to view rules in the Autopilot dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,27 +15,27 @@ ms.author: kenwith # View rules in the Autopilot dashboard > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Micorosft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The **Autopilot** dashboard in CloudKnox Permissions Management (CloudKnox) provides a table of information about **Autopilot rules** for administrators. +The **Autopilot** dashboard in Permissions Management provides a table of information about **Autopilot rules** for administrators. > [!NOTE] > Only users with the **Administrator** role can view and make changes on this tab. -## View a list of rules +## View a list of rules -1. In the CloudKnox home page, select the **Autopilot** tab. +1. In the Permissions Management home page, select the **Autopilot** tab. 1. In the **Autopilot** dashboard, from the **Authorization system types** dropdown, select the authorization system types you want: Amazon Web Services (**AWS**), Microsoft **Azure**, or Google Cloud Platform (**GCP**). 1. From the **Authorization System** dropdown, in the **List** and **Folders** box, select the account and folder names that you want. -1. Select **Apply**. +1. Select **Apply**. The following information displays in the **Autopilot Rules** table: - **Rule Name**: The name of the rule. - **State**: The status of the rule: idle (not being use) or active (being used). - - **Rule Type**: The type of rule being applied. + - **Rule Type**: The type of rule being applied. - **Mode**: The status of the mode: on-demand or not. - **Last Generated**: The date and time the rule was last generated. - **Created By**: The email address of the user who created the rule. @@ -49,7 +49,7 @@ The **Autopilot** dashboard in CloudKnox Permissions Management (CloudKnox) prov The following options are available: - **View Rule**: Select to view details of the rule. - - **Delete Rule**: Select to delete the rule. Only the user who created the selected rule can delete the rule. + - **Delete Rule**: Select to delete the rule. Only the user who created the selected rule can delete the rule. - **Generate Recommendations**: Creates recommendations for each user and the authorization system. Only the user who created the selected rule can create recommendations. - **View Recommendations**: Displays the recommendations for each user and authorization system. - **Notification Settings**: Displays the users subscribed to this rule. Only the user who created the selected rule can add other users to be notified. @@ -59,13 +59,13 @@ You can also select: - **Reload**: Select to refresh the displayed list of roles/policies. - **Search**: Select to search for a specific role/policy. - **Columns**: From the dropdown list, select the columns you want to display. - - Select **Reset to default** to return to the system defaults. -- **New Rule**: Select to create a new rule. For more information, see [Create a rule](cloudknox-howto-create-rule.md). + - Select **Reset to default** to return to the system defaults. +- **New Rule**: Select to create a new rule. For more information, see [Create a rule](how-to-create-rule.md). ## Next steps -- For information about creating rules, see [Create a rule](cloudknox-howto-create-rule.md). -- For information about generating, viewing, and applying rule recommendations for rules, see [Generate, view, and apply rule recommendations for rules](cloudknox-howto-recommendations-rule.md). -- For information about notification settings for rules, see [View notification settings for a rule](cloudknox-howto-notifications-rule.md). +- For information about creating rules, see [Create a rule](how-to-create-rule.md). +- For information about generating, viewing, and applying rule recommendations for rules, see [Generate, view, and apply rule recommendations for rules](how-to-recommendations-rule.md). +- For information about notification settings for rules, see [View notification settings for a rule](how-to-notifications-rule.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-dashboard.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-dashboard.md similarity index 77% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-dashboard.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/ui-dashboard.md index f813214c6f40..23e1ce1b147b 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-dashboard.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-dashboard.md @@ -1,6 +1,6 @@ --- -title: View key statistics and data about your authorization system in CloudKnox Permissions Management -description: How to view statistics and data about your authorization system in the CloudKnox Permissions Management. +title: View key statistics and data about your authorization system in Permissions Management +description: How to view statistics and data about your authorization system in the Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -16,31 +16,31 @@ ms.author: kenwith # View key statistics and data about your authorization system > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -CloudKnox Permissions Management (CloudKnox) provides a summary of key statistics and data about your authorization system regularly. This information is available for Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). +Permissions Management provides a summary of key statistics and data about your authorization system regularly. This information is available for Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). ## View metrics related to avoidable risk -The data provided by CloudKnox includes metrics related to avoidable risk. These metrics allow the CloudKnox administrator to identify areas where they can reduce risks related to the principle of least permissions. +The data provided by Permissions Management includes metrics related to avoidable risk. These metrics allow the Permissions Management administrator to identify areas where they can reduce risks related to the principle of least permissions. -You can view the following information in CloudKnox: +You can view the following information in Entra: -- The **Permission Creep Index (PCI)** heat map on the CloudKnox **Dashboard** identifies: +- The **Permission Creep Index (PCI)** heat map on the Permissions Management **Dashboard** identifies: - The number of users who have been granted high-risk permissions but aren't using them. - The number of users who contribute to the permission creep index (PCI) and where they are on the scale. -- The [**Analytics** dashboard](cloudknox-usage-analytics-home.md) provides a snapshot of permission metrics within the last 90 days. +- The [**Analytics** dashboard](usage-analytics-home.md) provides a snapshot of permission metrics within the last 90 days. -## Components of the CloudKnox Dashboard +## Components of the Permissions Management Dashboard -The CloudKnox **Dashboard** displays the following information: +The Permissions Management **Dashboard** displays the following information: - **Authorization system types**: A dropdown list of authorization system types you can access: AWS, Azure, and GCP. - -- **Authorization System**: Displays a **List** of accounts and **Folders** in the selected authorization system you can access. + +- **Authorization System**: Displays a **List** of accounts and **Folders** in the selected authorization system you can access. - To add or remove accounts and folders, from the **Name** list, select or deselect accounts and folders, and then select **Apply**. @@ -48,17 +48,17 @@ The CloudKnox **Dashboard** displays the following information: The PCI graph may display one or more bubbles. Each bubble displays the number of identities that are considered high risk. *High-risk* refers to the number of users who have permissions that exceed their normal or required usage. - To display a list of the number of identities contributing to the **Low PCI**, **Medium PCI**, and **High PCI**, select the **List** icon in the upper right of the graph. - - To display the PCI graph again, select the **Graph** icon in the upper right of the list box. + - To display the PCI graph again, select the **Graph** icon in the upper right of the list box. - **Highest PCI change**: Displays a list of your accounts and information about the **PCI** and **Change** in the index over the past 7 days. - To download the list, select the down arrow in the upper right of the list box. - The following message displays: **We'll email you a link to download the file.** - - Check your email for the message from the CloudKnox Customer Success Team. The email contains a link to the **PCI history** report in Microsoft Excel format. + The following message displays: **We'll email you a link to download the file.** + - Check your email for the message from the Permissions Management Customer Success Team. The email contains a link to the **PCI history** report in Microsoft Excel format. - The email also includes a link to the **Reports** dashboard, where you can configure how and when you want to receive reports automatically. - To view all the PCI changes, select **View all**. -- **Identity**: A summary of the **Findings** that includes: +- **Identity**: A summary of the **Findings** that includes: - The number of **Inactive** identities that haven't been accessed in over 90 days. - The number of **Super** identities that access data regularly. - The number of identities that can **Access secret information**: A list of roles that can access sensitive or secret information. @@ -73,7 +73,7 @@ The CloudKnox **Dashboard** displays the following information: - **Instances with access to S3 buckets** - **Unencrypted S3 buckets** - **SSE-S3 Encrypted buckets** - - **S3 Bucket accessible externally** + - **S3 Bucket accessible externally** @@ -85,7 +85,7 @@ The **Permission Creep Index** heat map shows the incurred risk of users with a - The number of resources a user has access to, otherwise known as resource reach. -- The high-risk permissions coupled with the number of resources a user has access to produce the score seen on the chart. +- The high-risk permissions coupled with the number of resources a user has access to produce the score seen on the chart. Permissions are classified as *high*, *medium*, and *low*. @@ -93,11 +93,11 @@ The **Permission Creep Index** heat map shows the incurred risk of users with a - **Medium** (displayed in yellow) - The score is between 34 and 67. The user has access to some high-risk permissions that they use, or have medium resource reach. - **Low** (displayed in green) - The score is between 0 and 33. The user has access to few high-risk permissions. They use all their permissions and have low resource reach. -- The number displayed on the graph shows how many users contribute to a particular score. To view detailed data about a user, hover over the number. +- The number displayed on the graph shows how many users contribute to a particular score. To view detailed data about a user, hover over the number. The distribution graph displays all the users who contribute to the permission creep. It displays how many users contribute to a particular score. For example, if the score from the PCI chart is 14, the graph shows how many users have a score of 14. -- The **PCI Trend** graph shows you the historical trend of the PCI score over the last 90 days. +- The **PCI Trend** graph shows you the historical trend of the PCI score over the last 90 days. - To download the **PCI history report**, select **Download**. ### View information on the heat map @@ -107,7 +107,7 @@ The **Permission Creep Index** heat map shows the incurred risk of users with a - The total number of **Identities** and how many of them are in the high, medium, and low categories. - The **PCI trend** over the last several weeks. -1. The **Identity** section below the heat map on the left side of the page shows all the relevant findings about identities, including roles that can access secret information, roles that are inactive, over provisioned active roles, and so on. +1. The **Identity** section below the heat map on the left side of the page shows all the relevant findings about identities, including roles that can access secret information, roles that are inactive, over provisioned active roles, and so on. - To expand the full list of identities, select **All findings**. @@ -116,7 +116,7 @@ The **Permission Creep Index** heat map shows the incurred risk of users with a ## The Analytics summary -You can also view a summary of users and activities section on the [Analytics dashboard](cloudknox-usage-analytics-home.md). This dashboard provides a snapshot of the following high-risk tasks or actions users have accessed, and displays the total number of users with the high-risk access, how many users are inactive or have unexecuted tasks, and how many users are active or have executed tasks: +You can also view a summary of users and activities section on the [Analytics dashboard](usage-analytics-home.md). This dashboard provides a snapshot of the following high-risk tasks or actions users have accessed, and displays the total number of users with the high-risk access, how many users are inactive or have unexecuted tasks, and how many users are active or have executed tasks: - **Users with access to high-risk tasks**: Displays the total number of users with access to a high risk task (**Total**), how many users have access but haven't used the task (**Inactive**), and how many users are actively using the task (**Active**). @@ -134,7 +134,5 @@ You can also view a summary of users and activities section on the [Analytics da ## Next steps -- For information on how to view authorization system and account activity data on the CloudKnox Dashboard, see [View data about the activity in your authorization system](cloudknox-product-dashboard.md). -- For an overview of the Analytics dashboard, see [An overview of the Analytics dashboard](cloudknox-usage-analytics-home.md). - - +- For information on how to view authorization system and account activity data on the Permissions ManagementDashboard, see [View data about the activity in your authorization system](product-dashboard.md). +- For an overview of the Analytics dashboard, see [An overview of the Analytics dashboard](usage-analytics-home.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-remediation.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-remediation.md similarity index 83% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-remediation.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/ui-remediation.md index c2a38900d6ff..4d8ae893d635 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-remediation.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-remediation.md @@ -1,6 +1,6 @@ --- -title: View existing roles/policies and requests for permission in the Remediation dashboard in CloudKnox Permissions Management -description: How to view existing roles/policies and requests for permission in the Remediation dashboard in CloudKnox Permissions Management. +title: View existing roles/policies and requests for permission in the Remediation dashboard in Permissions Management +description: How to view existing roles/policies and requests for permission in the Remediation dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,41 +15,41 @@ ms.author: kenwith # View roles/policies and requests for permission in the Remediation dashboard > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The **Remediation** dashboard in CloudKnox Permissions Management (CloudKnox) provides an overview of roles/policies, permissions, a list of existing requests for permissions, and requests for permissions you have made. +The **Remediation** dashboard in Permissions Management provides an overview of roles/policies, permissions, a list of existing requests for permissions, and requests for permissions you have made. This article provides an overview of the components of the **Remediation** dashboard. > [!NOTE] -> To view the **Remediation** dashboard, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this dashboard, you must have **Controller** or **Administrator** permissions. If you don’t have these permissions, contact your system administrator. +> To view the **Remediation** dashboard, your must have **Viewer**, **Controller**, or **Administrator** permissions. To make changes on this dashboard, you must have **Controller** or **Administrator** permissions. If you don't have these permissions, contact your system administrator. > [!NOTE] -> Microsoft Azure uses the term *role* for what other cloud providers call *policy*. CloudKnox automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. +> Microsoft Azure uses the term *role* for what other cloud providers call *policy*. Permissions Management automatically makes this terminology change when you select the authorization system type. In the user documentation, we use *role/policy* to refer to both. ## Display the Remediation dashboard -1. On the CloudKnox home page, select the **Remediation** tab. +1. On the Permissions Management home page, select the **Remediation** tab. - The **Remediation** dashboard includes six subtabs: + The **Remediation** dashboard includes six subtabs: - **Roles/Policies**: Use this subtab to perform Create Read Update Delete (CRUD) operations on roles/policies. - **Permissions**: Use this subtab to perform Read Update Delete (RUD) on granted permissions. - **Role/Policy Template**: Use this subtab to create a template for roles/policies template. - **Requests**: Use this subtab to view approved, pending, and processed Permission on Demand (POD) requests. - - **My Requests**: Use this tab to manage lifecycle of the POD request either created by you or needs your approval. + - **My Requests**: Use this tab to manage lifecycle of the POD request either created by you or needs your approval. - **Settings**: Use this subtab to select **Request Role/Policy Filters**, **Request Settings**, and **Auto-Approve** settings. 1. Use the dropdown to select the **Authorization System Type** and **Authorization System**, and then select **Apply**. -## View and create roles/policies +## View and create roles/policies The **Role/Policies** subtab provides the following settings that you can use to view and create a role/policy. - **Authorization System Type**: Displays a dropdown with authorization system types you can access, Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). - **Authorization System**: Displays a list of authorization systems accounts you can access. -- **Policy Type**: A dropdown with available role/policy types. You can select **All**, **Custom**, **System**, or **CloudKnox Only**. +- **Policy Type**: A dropdown with available role/policy types. You can select **All**, **Custom**, **System**, or **Permissions Management Only**. - **Policy Status**: A dropdown with available role/policy statuses. You can select **All**, **Assigned**, or **Unassigned**. - **Policy Usage**: A dropdown with **All** or **Unused** roles/policies. - **Apply**: Select this option to save the changes you've made. @@ -58,23 +58,23 @@ The **Role/Policies** subtab provides the following settings that you can use to The **Policy list** displays a list of existing roles/policies and the following information about each role/policy. - **Policy Name**: The name of the roles/policies available to you. -- **Policy Type**: **Custom**, **System**, or **CloudKnox Only** -- **Actions** +- **Policy Type**: **Custom**, **System**, or **Permissions Management Only** +- **Actions** - Select **Clone** to create a duplicate copy of the role/policy. - Select **Modify** to change the existing role/policy. - - Select **Delete** to delete the role/policy. + - Select **Delete** to delete the role/policy. Other options available to you: - **Search**: Select this option to search for a specific role/policy. - **Reload**: Select this option to refresh the displayed list of roles/policies. -- **Export CSV**: Select this option to export the displayed list of roles/policies as a comma-separated values (CSV) file. +- **Export CSV**: Select this option to export the displayed list of roles/policies as a comma-separated values (CSV) file. When the file is successfully exported, a message appears: **Exported Successfully.** - - Check your email for a message from the CloudKnox Customer Success Team. This email contains a link to: + - Check your email for a message from the Permissions Management Customer Success Team. This email contains a link to: - The **Role Policy Details** report in CSV format. - The **Reports** dashboard where you can configure how and when you can automatically receive reports. -- **Create Role/Policy**: Select this option to create a new role/policy. For more information, see [Create a role/policy](cloudknox-howto-create-role-policy.md). +- **Create Role/Policy**: Select this option to create a new role/policy. For more information, see [Create a role/policy](how-to-create-role-policy.md). ## Add filters to permissions @@ -91,11 +91,11 @@ The **Permissions** subtab provides the following settings that you can use to a - **Enter a Group Name**: A dropdown from which you can select a group name. - **Apply**: Select this option to save the changes you've made and run the filter. - **Reset Filter**: Select this option to discard the changes you've made. -- **Export CSV**: Select this option to export the displayed list of roles/policies as a comma-separated values (CSV) file. +- **Export CSV**: Select this option to export the displayed list of roles/policies as a comma-separated values (CSV) file. When the file is successfully exported, a message appears: **Exported Successfully.** - - Check your email for a message from the CloudKnox Customer Success Team. This email contains a link to: + - Check your email for a message from the Permissions Management Customer Success Team. This email contains a link to: - The **Role Policy Details** report in CSV format. - The **Reports** dashboard where you can configure how and when you can automatically receive reports. @@ -118,7 +118,7 @@ Other options available to you: - **Search**: Select this option to search for a specific role/policy. - **Reload**: Select this option to refresh the displayed list of roles/policies. -## View requests for permission +## View requests for permission Use the **Requests** tab to view a list of **Pending**, **Approved**, and **Processed** requests for permissions your team members have made. @@ -159,7 +159,7 @@ The **Pending** table displays the following information: **To return to the previous view:** -- Select the up arrow. +- Select the up arrow. ### View approved requests @@ -169,7 +169,7 @@ The **Approved** table displays information about the requests that have been ap The **Processed** table displays information about the requests that have been processed. -## View requests for permission for your approval +## View requests for permission for your approval Use the **My Requests** subtab to view a list of **Pending**, **Approved**, and **Processed** requests for permissions your team members have made and you must approve or reject. @@ -228,14 +228,13 @@ The **Settings** subtab provides the following settings that you can use to make ## Next steps -- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](cloudknox-ui-remediation.md). -- For information on how to create a role/policy, see [Create a role/policy](cloudknox-howto-create-role-policy.md). -- For information on how to clone a role/policy, see [Clone a role/policy](cloudknox-howto-clone-role-policy.md). -- For information on how to delete a role/policy, see [Delete a role/policy](cloudknox-howto-delete-role-policy.md). -- For information on how to modify a role/policy, see Modify a role/policy](cloudknox-howto-modify-role-policy.md). -- To view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md). -- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](cloudknox-howto-attach-detach-permissions.md). -- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](cloudknox-howto-revoke-task-readonly-status.md) -- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](cloudknox-howto-create-approve-privilege-request.md). -- For information on how to view information about roles/policies, see [View information about roles/policies](cloudknox-howto-view-role-policy.md) - +- For information on how to view existing roles/policies, requests, and permissions, see [View roles/policies, requests, and permission in the Remediation dashboard](ui-remediation.md). +- For information on how to create a role/policy, see [Create a role/policy](how-to-create-role-policy.md). +- For information on how to clone a role/policy, see [Clone a role/policy](how-to-clone-role-policy.md). +- For information on how to delete a role/policy, see [Delete a role/policy](how-to-delete-role-policy.md). +- For information on how to modify a role/policy, see Modify a role/policy](how-to-modify-role-policy.md). +- To view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md). +- For information on how to attach and detach permissions for AWS identities, see [Attach and detach policies for AWS identities](how-to-attach-detach-permissions.md). +- For information on how to revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities, see [Revoke high-risk and unused tasks or assign read-only status for Azure and GCP identities](how-to-revoke-task-readonly-status.md) +- For information on how to create or approve a request for permissions, see [Create or approve a request for permissions](how-to-create-approve-privilege-request.md). +- For information on how to view information about roles/policies, see [View information about roles/policies](how-to-view-role-policy.md) diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-tasks.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-tasks.md similarity index 63% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-tasks.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/ui-tasks.md index bd11caa9bb3b..c4d6d89960a9 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-tasks.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-tasks.md @@ -1,6 +1,6 @@ --- -title: View information about active and completed tasks in CloudKnox Permissions Management -description: How to view information about active and completed tasks in the Activities pane in CloudKnox Permissions Management. +title: View information about active and completed tasks in Permissions Management +description: How to view information about active and completed tasks in the Activities pane in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,24 +15,24 @@ ms.author: kenwith # View information about active and completed tasks > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes the usage of the **CloudKnox Tasks** pane in CloudKnox Permissions Management (CloudKnox). +This article describes the usage of the **Permissions Management Tasks** pane in Permissions Management. ## Display active and completed tasks -1. In the CloudKnox home page, select **Tasks** (the timer icon). +1. In the Permissions Management home page, select **Tasks** (the timer icon). - The **CloudKnox Tasks** pane appears on the right of the CloudKnox home page. It has two tabs: + The **Permissions Management Tasks** pane appears on the right of the Permissions Management home page. It has two tabs: - **Active**: Displays a list of active tasks, a description of each task, and when the task was started. If there are no active tasks, the following message displays: **There are no active tasks**. - **Completed**: Displays a list of completed tasks, a description of each task, when the task was started and ended, and whether the task **Failed** or **Succeeded**. If there are no completed activities, the following message displays: **There are no recently completed tasks**. -1. To close the **CloudKnox Tasks** pane, click outside the pane. +1. To close the **Permissions Management Tasks** pane, click outside the pane. ## Next steps -- For information on how to create a role/policy in the **Remediation** dashboard, see [Create a role/policy](cloudknox-howto-create-role-policy.md). +- For information on how to create a role/policy in the **Remediation** dashboard, see [Create a role/policy](how-to-create-role-policy.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-triggers.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-triggers.md similarity index 78% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-triggers.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/ui-triggers.md index c0faaaaba109..01471aee6f3a 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-triggers.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-triggers.md @@ -1,6 +1,6 @@ --- -title: View information about activity triggers in CloudKnox Permissions Management -description: How to view information about activity triggers in the Activity triggers dashboard in CloudKnox Permissions Management. +title: View information about activity triggers in Permissions Management +description: How to view information about activity triggers in the Activity triggers dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,14 +15,14 @@ ms.author: kenwith # View information about activity triggers > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how to use the **Activity triggers** dashboard in CloudKnox Permissions Management (CloudKnox) to view information about activity alerts and triggers. +This article describes how to use the **Activity triggers** dashboard in Permissions Management to view information about activity alerts and triggers. ## Display the Activity triggers dashboard -- In the CloudKnox home page, select **Activity triggers** (the bell icon). +- In the Permissions Management home page, select **Activity triggers** (the bell icon). The **Activity triggers** dashboard has four tabs: @@ -46,8 +46,8 @@ The **Alerts** subtab in the **Activity**, **Rule-Based Anomaly**, **Statistical - If you select **Custom Range**, also enter **From** and **To** duration settings. - **Apply**: Select this option to activate your settings. - **Reset Filter**: Select this option to discard your settings. -- **Reload**: Select this option to refresh the displayed information. -- **Create Activity Trigger**: Select this option to [create a new alert trigger](cloudknox-howto-create-alert-trigger.md). +- **Reload**: Select this option to refresh the displayed information. +- **Create Activity Trigger**: Select this option to [create a new alert trigger](how-to-create-alert-trigger.md). - The **Alerts** table displays a list of alerts with the following information: - **Alerts**: The name of the alert. - **# of users subscribed**: The number of users who have subscribed to the alert. @@ -64,10 +64,10 @@ The **Rule-Based Anomaly** tab and the **Statistical Anomaly** tab both have one The **Alert Triggers** subtab in the **Activity**, **Rule-Based Anomaly**, **Statistical Anomaly**, and **Permission Analytics** tab displays the following information: - **Status**: Select the alert status you want to display: **All**, **Activated**, or **Deactivated**. -- **Apply**: Select this option to activate your settings. +- **Apply**: Select this option to activate your settings. - **Reset Filter**: Select this option to discard your settings. -- **Reload**: Select **Reload** to refresh the displayed information. -- **Create Activity Trigger**: Select this option to [create a new alert trigger](cloudknox-howto-create-alert-trigger.md). +- **Reload**: Select **Reload** to refresh the displayed information. +- **Create Activity Trigger**: Select this option to [create a new alert trigger](how-to-create-alert-trigger.md). - The **Triggers** table displays a list of triggers with the following information: - **Alerts**: The name of the alert. - **# of users subscribed**: The number of users who have subscribed to the alert. @@ -81,7 +81,7 @@ The **Alert Triggers** subtab in the **Activity**, **Rule-Based Anomaly**, **Sta ## Next steps -- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](cloudknox-howto-create-alert-trigger.md). -- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](cloudknox-product-rule-based-anomalies.md). -- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](cloudknox-product-statistical-anomalies.md). -- For information on permission analytics triggers, see [Create and view permission analytics triggers](cloudknox-product-permission-analytics.md). +- For information on activity alerts and alert triggers, see [Create and view activity alerts and alert triggers](how-to-create-alert-trigger.md). +- For information on rule-based anomalies and anomaly triggers, see [Create and view rule-based anomalies and anomaly triggers](product-rule-based-anomalies.md). +- For information on finding outliers in identity's behavior, see [Create and view statistical anomalies and anomaly triggers](product-statistical-anomalies.md). +- For information on permission analytics triggers, see [Create and view permission analytics triggers](product-permission-analytics.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-user-management.md b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-user-management.md similarity index 82% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-user-management.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/ui-user-management.md index 010ce9de7b7e..0ac386906b41 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-ui-user-management.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/ui-user-management.md @@ -1,6 +1,6 @@ --- -title: Manage users and groups with the User management dashboard in CloudKnox Permissions Management -description: How to manage users and groups in the User management dashboard in CloudKnox Permissions Management. +title: Manage users and groups with the User management dashboard in Permissions Management +description: How to manage users and groups in the User management dashboard in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,14 +15,14 @@ ms.author: kenwith # Manage users and groups with the User management dashboard > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article describes how to use the CloudKnox Permissions Management (CloudKnox) **User management** dashboard to view and manage users and groups. +This article describes how to use the Permissions Management **User management** dashboard to view and manage users and groups. **To display the User management dashboard**: -- In the upper right of the CloudKnox home page, select **User** (your initials) in the upper right of the screen, and then select **User management.** +- In the upper right of the Permissions Management home page, select **User** (your initials) in the upper right of the screen, and then select **User management.** The **User Management** dashboard has two tabs: @@ -30,7 +30,7 @@ This article describes how to use the CloudKnox Permissions Management (CloudKno - **Groups**: Displays information about groups. ## Manage users - + Use the **Users** tab to display the following information about users: - **Name** and **Email Address**: The user's name and email address. @@ -49,11 +49,11 @@ You can also select the following options: - **Search**: Enter a name or email address to search for a specific user. ## Manage groups - + Use the **Groups** tab to display the following information about groups: - **Name**: Displays the registered user's name and email address. -- **Permissions**: +- **Permissions**: - The **Authorization Systems** and the type of permissions the user has been granted: **Admin for all Authorization System Types**, **Admin for selected Authorization System Types**, or **Custom**. - Information about the **Viewer**, **Controller**, **Approver**, and **Requestor**. - **Modified By**: The email address of the user who modified the group. @@ -69,7 +69,7 @@ Use the **Groups** tab to display the following information about groups: - **Edit Permissions**: Select this option to modify the group's permissions. - **Delete**: Select this option to delete the group's permissions. - The **Delete Permission** box asks you to confirm that you want to delete the group. + The **Delete Permission** box asks you to confirm that you want to delete the group. - Select **Delete** if you want to delete the group, **Cancel** to discard your changes. @@ -77,13 +77,13 @@ You can also select the following options: - **Reload**: Select this option to refresh the information displayed in the **User** table. - **Search**: Enter a name or email address to search for a specific user. -- **Filters**: Select the authorization systems and accounts you want to display. -- **Create Permission**: Create a group and set up its permissions. For more information, see [Create group-based permissions](cloudknox-howto-create-group-based-permissions.md) +- **Filters**: Select the authorization systems and accounts you want to display. +- **Create Permission**: Create a group and set up its permissions. For more information, see [Create group-based permissions](how-to-create-group-based-permissions.md) ## Next steps -- For information about how to view information about active and completed tasks, see [View information about active and completed tasks](cloudknox-ui-tasks.md). -- For information about how to view personal and organization information, see [View personal and organization information](cloudknox-product-account-settings.md). -- For information about how to select group-based permissions settings, see [Select group-based permissions settings](cloudknox-howto-create-group-based-permissions.md). +- For information about how to view information about active and completed tasks, see [View information about active and completed tasks](ui-tasks.md). +- For information about how to view personal and organization information, see [View personal and organization information](product-account-settings.md). +- For information about how to select group-based permissions settings, see [Select group-based permissions settings](how-to-create-group-based-permissions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-access-keys.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-access-keys.md similarity index 83% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-access-keys.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-access-keys.md index 2d8b54bda220..c2677c84e9cf 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-access-keys.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-access-keys.md @@ -1,6 +1,6 @@ --- -title: View analytic information about access keys in CloudKnox Permissions Management -description: How to view analytic information about access keys in CloudKnox Permissions Management. +title: View analytic information about access keys in Permissions Management +description: How to view analytic information about access keys in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,10 +15,10 @@ ms.author: kenwith # View analytic information about access keys > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The **Analytics** dashboard in CloudKnox Permissions Management (CloudKnox) provides details about identities, resources, and tasks that you can use make informed decisions about granting permissions, and reducing risk on unused permissions. +The **Analytics** dashboard in Permissions Management provides details about identities, resources, and tasks that you can use make informed decisions about granting permissions, and reducing risk on unused permissions. - **Users**: Tracks assigned permissions and usage of various identities. - **Groups**: Tracks assigned permissions and usage of the group and the group members. @@ -31,9 +31,9 @@ This article describes how to view usage analytics about access keys. ## Create a query to view access keys -When you select **Access keys**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. +When you select **Access keys**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. -1. On the main **Analytics** dashboard, select **Access Keys** from the drop-down list at the top of the screen. +1. On the main **Analytics** dashboard, select **Access Keys** from the drop-down list at the top of the screen. The following components make up the **Access Keys** dashboard: @@ -53,8 +53,8 @@ When you select **Access keys**, the **Analytics** dashboard provides a high-lev The **Access Keys** table displays the results of your query. -- **Access Key ID**: Provides the ID for the access key. - - To view details about the access keys, select the down arrow to the left of the ID. +- **Access Key ID**: Provides the ID for the access key. + - To view details about the access keys, select the down arrow to the left of the ID. - The **Owner** name. - The **Account** number. - The **Permission Creep Index (PCI)**: Provides the following information: @@ -65,26 +65,26 @@ The **Access Keys** table displays the results of your query. - **Access Key Age**: How old the access key is, in days. - **Last Used**: How long ago the access key was last accessed. -## Apply filters to your query +## Apply filters to your query -There are many filter options within the **Active Tasks** screen, including filters by **Authorization System**, filters by **User** and filters by **Task**. -Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. +There are many filter options within the **Active Tasks** screen, including filters by **Authorization System**, filters by **User** and filters by **Task**. +Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. ### Apply filters by authorization system type 1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. 1. Select **Apply** to run your query and display the information you selected. - Select **Reset Filter** to discard your changes. + Select **Reset Filter** to discard your changes. ### Apply filters by authorization system -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. 1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. 1. Select **Apply** to run your query and display the information you selected. - Select **Reset Filter** to discard your changes. + Select **Reset Filter** to discard your changes. ### Apply filters by key status @@ -128,12 +128,12 @@ Filters can be applied in one, two, or all three categories depending on the typ ## Export the results of your query -- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV** or **CSV (Detailed)**. +- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV** or **CSV (Detailed)**. ## Next steps -- To view active tasks, see [View usage analytics about active tasks](cloudknox-usage-analytics-active-tasks.md). -- To view assigned permissions and usage by users, see [View usage analytics about users](cloudknox-usage-analytics-users.md). -- To view assigned permissions and usage of the group and the group members, see [View usage analytics about groups](cloudknox-usage-analytics-groups.md). -- To view active resources, see [View usage analytics about active resources](cloudknox-usage-analytics-active-resources.md). -- To view assigned permissions and usage of the serverless functions, see [View usage analytics about serverless functions](cloudknox-usage-analytics-serverless-functions.md). +- To view active tasks, see [View usage analytics about active tasks](usage-analytics-active-tasks.md). +- To view assigned permissions and usage by users, see [View usage analytics about users](usage-analytics-users.md). +- To view assigned permissions and usage of the group and the group members, see [View usage analytics about groups](usage-analytics-groups.md). +- To view active resources, see [View usage analytics about active resources](usage-analytics-active-resources.md). +- To view assigned permissions and usage of the serverless functions, see [View usage analytics about serverless functions](usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-active-resources.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-active-resources.md similarity index 80% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-active-resources.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-active-resources.md index e42aa721e001..d05b4f4b4898 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-active-resources.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-active-resources.md @@ -1,6 +1,6 @@ --- -title: View analytic information about active resources in CloudKnox Permissions Management -description: How to view usage analytics about active resources in CloudKnox Permissions Management. +title: View analytic information about active resources in Permissions Management +description: How to view usage analytics about active resources in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,10 +15,10 @@ ms.author: kenwith # View analytic information about active resources > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The **Analytics** dashboard in CloudKnox Permissions Management (CloudKnox) collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: +The **Analytics** dashboard in Permissions Management collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: - **Users**: Tracks assigned permissions and usage of various identities. - **Groups**: Tracks assigned permissions and usage of the group and the group members. @@ -31,7 +31,7 @@ This article describes how to view usage analytics about active resources. ## Create a query to view active resources -1. On the main **Analytics** dashboard, select **Active Resources** from the drop-down list at the top of the screen. +1. On the main **Analytics** dashboard, select **Active Resources** from the drop-down list at the top of the screen. The dashboard only lists tasks that are active. The following components make up the **Active Resources** dashboard: 1. From the dropdowns, select: @@ -50,8 +50,8 @@ This article describes how to view usage analytics about active resources. The **Active Resources** table displays the results of your query: -- **Resource Name**: Provides the name of the task. - - To view details about the task, select the down arrow. +- **Resource Name**: Provides the name of the task. + - To view details about the task, select the down arrow. - **Account**: The name of the account. - **Resources Type**: The type of resources used, for example, **bucket** or **key**. - **Tasks**: Displays the number of **Granted** and **Executed** tasks. @@ -68,26 +68,26 @@ The **Active Resources** table displays the results of your query: 1. To add the tag to the serverless function, select **Add Tag**. -## Apply filters to your query +## Apply filters to your query -There are many filter options within the **Active Resources** screen, including filters by **Authorization System**, filters by **User** and filters by **Task**. -Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. +There are many filter options within the **Active Resources** screen, including filters by **Authorization System**, filters by **User** and filters by **Task**. +Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. ### Apply filters by authorization system 1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. 1. Select **Apply** to run your query and display the information you selected. - Select **Reset Filter** to discard your changes. + Select **Reset Filter** to discard your changes. ### Apply filters by authorization system type -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. 1. From the **Authorization System** dropdown, select from a **List** of accounts and **Folders**. 1. Select **Apply** to run your query and display the information you selected. - Select **Reset Filter** to discard your changes. + Select **Reset Filter** to discard your changes. ### Apply filters by task type @@ -114,13 +114,13 @@ You can filter user details by type of user, user role, app, or service used, or ## Export the results of your query -- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. +- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. ## Next steps -- To track active tasks, see [View usage analytics about active tasks](cloudknox-usage-analytics-active-tasks.md). -- To track assigned permissions and usage of users, see [View usage analytics about users](cloudknox-usage-analytics-users.md). -- To track assigned permissions and usage of the group and the group members, see [View usage analytics about groups](cloudknox-usage-analytics-groups.md). -- To track the permission usage of access keys for a given user, see [View usage analytics about access keys](cloudknox-usage-analytics-access-keys.md). -- To track assigned permissions and usage of the serverless functions, see [View usage analytics about serverless functions](cloudknox-usage-analytics-serverless-functions.md). +- To track active tasks, see [View usage analytics about active tasks](usage-analytics-active-tasks.md). +- To track assigned permissions and usage of users, see [View usage analytics about users](usage-analytics-users.md). +- To track assigned permissions and usage of the group and the group members, see [View usage analytics about groups](usage-analytics-groups.md). +- To track the permission usage of access keys for a given user, see [View usage analytics about access keys](usage-analytics-access-keys.md). +- To track assigned permissions and usage of the serverless functions, see [View usage analytics about serverless functions](usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-active-tasks.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-active-tasks.md similarity index 77% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-active-tasks.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-active-tasks.md index e0e6679f637f..729df078b99d 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-active-tasks.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-active-tasks.md @@ -1,6 +1,6 @@ --- -title: View analytic information about active tasks in CloudKnox Permissions Management -description: How to view analytic information about active tasks in CloudKnox Permissions Management. +title: View analytic information about active tasks in Permissions Management +description: How to view analytic information about active tasks in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,10 +15,10 @@ ms.author: kenwith # View analytic information about active tasks > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The **Analytics** dashboard in CloudKnox Permissions Management (CloudKnox) collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: +The **Analytics** dashboard in Permissions Management collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: - **Users**: Tracks assigned permissions and usage of various identities. - **Groups**: Tracks assigned permissions and usage of the group and the group members. @@ -31,9 +31,9 @@ This article describes how to view usage analytics about active tasks. ## Create a query to view active tasks -When you select **Active Tasks**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. +When you select **Active Tasks**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. -1. On the main **Analytics** dashboard, select **Active Tasks** from the drop-down list at the top of the screen. +1. On the main **Analytics** dashboard, select **Active Tasks** from the drop-down list at the top of the screen. The dashboard only lists tasks that are active. The following components make up the **Active Tasks** dashboard: @@ -51,12 +51,12 @@ When you select **Active Tasks**, the **Analytics** dashboard provides a high-le The **Active Tasks** table displays the results of your query. -- **Task Name**: Provides the name of the task. - - To view details about the task, select the down arrow in the table. +- **Task Name**: Provides the name of the task. + - To view details about the task, select the down arrow in the table. - A **Normal Task** icon displays to the left of the task name if the task is normal (that is, not risky). - - A **Deleted Task** icon displays to the left of the task name if the task involved deleting data. - - A **High-Risk Task** icon displays to the left of the task name if the task is high-risk. + - A **Deleted Task** icon displays to the left of the task name if the task involved deleting data. + - A **High-Risk Task** icon displays to the left of the task name if the task is high-risk. - **Performed on (resources)**: The number of resources on which the task was used. @@ -65,25 +65,25 @@ The **Active Tasks** table displays the results of your query. - **Accessed**: Displays the number of users that have accessed the task. -## Apply filters to your query +## Apply filters to your query -There are many filter options within the **Active Tasks** screen, including **Authorization System**, **User**, and **Task**. -Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. +There are many filter options within the **Active Tasks** screen, including **Authorization System**, **User**, and **Task**. +Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. ### Apply filters by authorization system type 1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. 1. Select **Apply** to run your query and display the information you selected. - Select **Reset Filter** to discard your changes. + Select **Reset Filter** to discard your changes. ### Apply filters by authorization system -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. 1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. 1. Select **Apply** to run your query and display the information you selected. - Select **Reset Filter** to discard your changes. + Select **Reset Filter** to discard your changes. ### Apply filters by task type @@ -100,12 +100,12 @@ You can filter user details by type of user, user role, app, or service used, or ## Export the results of your query -- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. +- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. ## Next steps -- To view assigned permissions and usage by users, see [View analytic information about users](cloudknox-usage-analytics-users.md). -- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](cloudknox-usage-analytics-groups.md). -- To view active resources, see [View analytic information about active resources](cloudknox-usage-analytics-active-resources.md). -- To view the permission usage of access keys for a given user, see [View analytic information about access keys](cloudknox-usage-analytics-access-keys.md). -- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](cloudknox-usage-analytics-serverless-functions.md). +- To view assigned permissions and usage by users, see [View analytic information about users](usage-analytics-users.md). +- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](usage-analytics-groups.md). +- To view active resources, see [View analytic information about active resources](usage-analytics-active-resources.md). +- To view the permission usage of access keys for a given user, see [View analytic information about access keys](usage-analytics-access-keys.md). +- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-groups.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-groups.md similarity index 83% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-groups.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-groups.md index f53777999454..11894bc662e3 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-groups.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-groups.md @@ -1,6 +1,6 @@ --- -title: View analytic information about groups in CloudKnox Permissions Management -description: How to view analytic information about groups in CloudKnox Permissions Management. +title: View analytic information about groups in Permissions Management +description: How to view analytic information about groups in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,10 +15,10 @@ ms.author: kenwith # View analytic information about groups > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The **Analytics** dashboard in CloudKnox Permissions Management (CloudKnox) collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: +The **Analytics** dashboard in Permissions Management collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: - **Users**: Tracks assigned permissions and usage of various identities. - **Groups**: Tracks assigned permissions and usage of the group and the group members. @@ -31,9 +31,9 @@ This article describes how to view usage analytics about groups. ## Create a query to view groups -When you select **Groups**, the **Usage Analytics** dashboard provides a high-level overview of groups. +When you select **Groups**, the **Usage Analytics** dashboard provides a high-level overview of groups. -1. On the main **Analytics** dashboard, select **Groups** from the drop-down list at the top of the screen. +1. On the main **Analytics** dashboard, select **Groups** from the drop-down list at the top of the screen. The following components make up the **Groups** dashboard: @@ -51,8 +51,8 @@ When you select **Groups**, the **Usage Analytics** dashboard provides a high-le The **Groups** table displays the results of your query: -- **Group Name**: Provides the name of the group. - - To view details about the group, select the down arrow. +- **Group Name**: Provides the name of the group. + - To view details about the group, select the down arrow. - A **Group Type** icon displays to the left of the group name to describe the type of group (**ED** or **Local**). - The **Domain/Account** name. - The **Permission Creep Index (PCI)**: Provides the following information: @@ -83,25 +83,25 @@ The **Groups** table displays the results of your query: 1. From the **Tasks** dropdown, select **All Tasks**, **High Risk Tasks**, and **Delete Tasks**. 1. The pane on the right displays a list of **Users**, **Policies** for **AWS** and **Roles** for **GCP or AZURE**, and **Tags**. -## Apply filters to your query +## Apply filters to your query -There are many filter options within the **Groups** screen, including filters by **Authorization System Type**, **Authorization System**, **Group Type**, **Group Activity Status**, and **Tasks Type**. -Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. +There are many filter options within the **Groups** screen, including filters by **Authorization System Type**, **Authorization System**, **Group Type**, **Group Activity Status**, and **Tasks Type**. +Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. ### Apply filters by authorization system type 1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. 1. Select **Apply** to run your query and display the information you selected. - Select **Reset Filter** to discard your changes. + Select **Reset Filter** to discard your changes. ### Apply filters by authorization system -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. 1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. 1. Select **Apply** to run your query and display the information you selected. - Select **Reset Filter** to discard your changes. + Select **Reset Filter** to discard your changes. ### Apply filters by group type @@ -140,15 +140,15 @@ You can filter user details by type of user, user role, app, or service used, or ## Export the results of your query -- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. -- To view a list of members of the groups in your query, select **Export**, and then select **Memberships**. +- To view a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. +- To view a list of members of the groups in your query, select **Export**, and then select **Memberships**. ## Next steps -- To view active tasks, see [View analytic information about active tasks](cloudknox-usage-analytics-active-tasks.md). -- To view assigned permissions and usage by users, see [View analytic information about users](cloudknox-usage-analytics-users.md). -- To view active resources, see [View analytic information about active resources](cloudknox-usage-analytics-active-resources.md). -- To view the permission usage of access keys for a given user, see [View analytic information about access keys](cloudknox-usage-analytics-access-keys.md). -- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](cloudknox-usage-analytics-serverless-functions.md). +- To view active tasks, see [View analytic information about active tasks](usage-analytics-active-tasks.md). +- To view assigned permissions and usage by users, see [View analytic information about users](usage-analytics-users.md). +- To view active resources, see [View analytic information about active resources](usage-analytics-active-resources.md). +- To view the permission usage of access keys for a given user, see [View analytic information about access keys](usage-analytics-access-keys.md). +- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](usage-analytics-serverless-functions.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-home.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-home.md similarity index 61% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-home.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-home.md index 055a106e941f..e0933b95f7a1 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-home.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-home.md @@ -1,6 +1,6 @@ --- -title: View analytic information with the Analytics dashboard in CloudKnox Permissions Management -description: How to use the Analytics dashboard in CloudKnox Permissions Management to view details about users, groups, active resources, active tasks, access keys, and serverless functions. +title: View analytic information with the Analytics dashboard in Permissions Management +description: How to use the Analytics dashboard in Permissions Management to view details about users, groups, active resources, active tasks, access keys, and serverless functions. services: active-directory author: kenwith manager: rkarlin @@ -15,28 +15,28 @@ ms.author: kenwith # View analytic information with the Analytics dashboard > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -This article provides a brief overview of the Analytics dashboard in CloudKnox Permissions Management (CloudKnox), and the type of analytic information it provides for Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). +This article provides a brief overview of the Analytics dashboard in Permissions Management, and the type of analytic information it provides for Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). ## Display the Analytics dashboard -- From the CloudKnox home page, select the **Analytics** tab. +- From the Permissions Management home page, select the **Analytics** tab. - The **Analytics** dashboard displays detailed information about: + The **Analytics** dashboard displays detailed information about: - - **Users**: Tracks assigned permissions and usage by users. For more information, see [View analytic information about users](cloudknox-usage-analytics-users.md). + - **Users**: Tracks assigned permissions and usage by users. For more information, see [View analytic information about users](usage-analytics-users.md). - - **Groups**: Tracks assigned permissions and usage of the group and the group members. For more information, see [View analytic information about groups](cloudknox-usage-analytics-groups.md). + - **Groups**: Tracks assigned permissions and usage of the group and the group members. For more information, see [View analytic information about groups](usage-analytics-groups.md). - - **Active Resources**: Tracks resources that have been used in the last 90 days. For more information, see [View analytic information about active resources](cloudknox-usage-analytics-active-resources.md). + - **Active Resources**: Tracks resources that have been used in the last 90 days. For more information, see [View analytic information about active resources](usage-analytics-active-resources.md). - - **Active Tasks**: Tracks tasks that have been performed in the last 90 days. For more information, see [View analytic information about active tasks](cloudknox-usage-analytics-active-tasks.md). + - **Active Tasks**: Tracks tasks that have been performed in the last 90 days. For more information, see [View analytic information about active tasks](usage-analytics-active-tasks.md). - - **Access Keys**: Tracks the permission usage of access keys for a given user. For more information, see [View analytic information about access keys](cloudknox-usage-analytics-access-keys.md). + - **Access Keys**: Tracks the permission usage of access keys for a given user. For more information, see [View analytic information about access keys](usage-analytics-access-keys.md). - - **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions for AWS only. For more information, see [View analytic information about serverless functions](cloudknox-usage-analytics-serverless-functions.md). + - **Serverless Functions**: Tracks assigned permissions and usage of the serverless functions for AWS only. For more information, see [View analytic information about serverless functions](usage-analytics-serverless-functions.md). System administrators can use this information to make decisions about granting permissions and reducing risk on unused permissions. @@ -44,9 +44,9 @@ This article provides a brief overview of the Analytics dashboard in CloudKnox P ## Next steps -- To view active tasks, see [View analytic information about active tasks](cloudknox-usage-analytics-active-tasks.md). -- To view assigned permissions and usage by users, see [View analytic information about users](cloudknox-usage-analytics-users.md). -- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](cloudknox-usage-analytics-groups.md). -- To view active resources, see [View analytic information about active resources](cloudknox-usage-analytics-active-resources.md). -- To view the permission usage of access keys for a given user, see [View analytic information about access keys](cloudknox-usage-analytics-access-keys.md). -- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](cloudknox-usage-analytics-serverless-functions.md). \ No newline at end of file +- To view active tasks, see [View analytic information about active tasks](usage-analytics-active-tasks.md). +- To view assigned permissions and usage by users, see [View analytic information about users](usage-analytics-users.md). +- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](usage-analytics-groups.md). +- To view active resources, see [View analytic information about active resources](usage-analytics-active-resources.md). +- To view the permission usage of access keys for a given user, see [View analytic information about access keys](usage-analytics-access-keys.md). +- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](usage-analytics-serverless-functions.md). \ No newline at end of file diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-serverless-functions.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-serverless-functions.md similarity index 79% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-serverless-functions.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-serverless-functions.md index 976ff2b442a0..e9d93ed26b0a 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-serverless-functions.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-serverless-functions.md @@ -1,6 +1,6 @@ --- -title: View analytic information about serverless functions in CloudKnox Permissions Management -description: How to view analytic information about serverless functions in CloudKnox Permissions Management. +title: View analytic information about serverless functions in Permissions Management +description: How to view analytic information about serverless functions in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,10 +15,10 @@ ms.author: kenwith # View analytic information about serverless functions > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The **Analytics** dashboard in CloudKnox Permissions Management (CloudKnox) collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: +The **Analytics** dashboard in Permissions Management collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: - **Users**: Tracks assigned permissions and usage of various identities. - **Groups**: Tracks assigned permissions and usage of the group and the group members. @@ -31,9 +31,9 @@ This article describes how to view usage analytics about serverless functions. ## Create a query to view serverless functions -When you select **Serverless Functions**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. +When you select **Serverless Functions**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. -1. On the main **Analytics** dashboard, select **Serverless Functions** from the dropdown list at the top of the screen. +1. On the main **Analytics** dashboard, select **Serverless Functions** from the dropdown list at the top of the screen. The following components make up the **Serverless Functions** dashboard: @@ -49,8 +49,8 @@ When you select **Serverless Functions**, the **Analytics** dashboard provides a The **Serverless Functions** table displays the results of your query. -- **Function Name**: Provides the name of the serverless function. - - To view details about a serverless function, select the down arrow to the left of the function name. +- **Function Name**: Provides the name of the serverless function. + - To view details about a serverless function, select the down arrow to the left of the function name. - A **Function Type** icon displays to the left of the function name to describe the type of serverless function, for example **Lambda function**. - The **Permission Creep Index (PCI)**: Provides the following information: - **Index**: A numeric value assigned to the PCI. @@ -81,32 +81,32 @@ The **Serverless Functions** table displays the results of your query. 1. From the **Tasks** dropdown, select **All Tasks**, **High Risk Tasks**, and **Delete Tasks**. -## Apply filters to your query +## Apply filters to your query -You can filter the **Serverless Functions** results by **Authorization System Type** and **Authorization System**. +You can filter the **Serverless Functions** results by **Authorization System Type** and **Authorization System**. ### Apply filters by authorization system type 1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. 1. Select **Apply** to run your query and display the information you selected. - Select **Reset Filter** to discard your changes. + Select **Reset Filter** to discard your changes. ### Apply filters by authorization system -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. 1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. 1. Select **Apply** to run your query and display the information you selected. - Select **Reset Filter** to discard your changes. + Select **Reset Filter** to discard your changes. ## Next steps -- To view active tasks, see [View usage analytics about active tasks](cloudknox-usage-analytics-active-tasks.md). -- To view assigned permissions and usage by users, see [View analytic information about users](cloudknox-usage-analytics-users.md). -- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](cloudknox-usage-analytics-groups.md). -- To view active resources, see [View analytic information about active resources](cloudknox-usage-analytics-active-resources.md). -- To view the permission usage of access keys for a given user, see [View analytic information about access keys](cloudknox-usage-analytics-access-keys.md). +- To view active tasks, see [View usage analytics about active tasks](usage-analytics-active-tasks.md). +- To view assigned permissions and usage by users, see [View analytic information about users](usage-analytics-users.md). +- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](usage-analytics-groups.md). +- To view active resources, see [View analytic information about active resources](usage-analytics-active-resources.md). +- To view the permission usage of access keys for a given user, see [View analytic information about access keys](usage-analytics-access-keys.md). diff --git a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-users.md b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-users.md similarity index 83% rename from articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-users.md rename to articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-users.md index 43aea761580c..51779608d21c 100644 --- a/articles/active-directory/cloud-infrastructure-entitlement-management/cloudknox-usage-analytics-users.md +++ b/articles/active-directory/cloud-infrastructure-entitlement-management/usage-analytics-users.md @@ -1,6 +1,6 @@ --- -title: View analytic information about users in CloudKnox Permissions Management -description: How to view analytic information about users in CloudKnox Permissions Management. +title: View analytic information about users in Permissions Management +description: How to view analytic information about users in Permissions Management. services: active-directory author: kenwith manager: rkarlin @@ -15,10 +15,10 @@ ms.author: kenwith # View analytic information about users > [!IMPORTANT] -> CloudKnox Permissions Management (CloudKnox) is currently in PREVIEW. +> Microsoft Entra Permissions Management is currently in PREVIEW. > Some information relates to a prerelease product that may be substantially modified before it's released. Microsoft makes no warranties, express or implied, with respect to the information provided here. -The **Analytics** dashboard in CloudKnox Permissions Management (CloudKnox) collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: +The **Analytics** dashboard in Permissions Management collects detailed information, analyzes, reports on, and visualizes data about all identity types. System administrators can use the information to make informed decisions about granting permissions and reducing risk on unused permissions for: - **Users**: Tracks assigned permissions and usage of various identities. - **Groups**: Tracks assigned permissions and usage of the group and the group members. @@ -31,9 +31,9 @@ This article describes how to view usage analytics about users. ## Create a query to view users -When you select **Users**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. +When you select **Users**, the **Analytics** dashboard provides a high-level overview of tasks used by various identities. -1. On the main **Analytics** dashboard, select **Users** from the drop-down list at the top of the screen. +1. On the main **Analytics** dashboard, select **Users** from the drop-down list at the top of the screen. The following components make up the **Users** dashboard: @@ -43,15 +43,15 @@ When you select **Users**, the **Analytics** dashboard provides a high-level ove - **Search**: Enter criteria to find specific tasks. 1. Select **Apply** to display the criteria you've selected. - Select **Reset filter** to discard your changes. + Select **Reset filter** to discard your changes. ## View the results of your query The **Identities** table displays the results of your query. -- **Name**: Provides the name of the group. - - To view details about the group, select the down arrow. +- **Name**: Provides the name of the group. + - To view details about the group, select the down arrow. - The **Domain/Account** name. - The **Permission Creep Index (PCI)**: Provides the following information: - **Index**: A numeric value assigned to the PCI. @@ -79,26 +79,26 @@ The **Identities** table displays the results of your query. A message displays to confirm that your remediation settings are automatically updated. -## Apply filters to your query +## Apply filters to your query -There are many filter options within the **Users** screen, including filters by **Authorization System**, **Identity Type**, and **Identity State**. -Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. +There are many filter options within the **Users** screen, including filters by **Authorization System**, **Identity Type**, and **Identity State**. +Filters can be applied in one, two, or all three categories depending on the type of information you're looking for. ### Apply filters by authorization system type 1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. 1. Select **Apply** to run your query and display the information you selected. - Select **Reset Filter** to discard your changes. + Select **Reset Filter** to discard your changes. ### Apply filters by authorization system -1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. +1. From the **Authorization System Type** dropdown, select the authorization system you want to use: **AWS**, **Azure**, or **GCP**. 1. From the **Authorization System** dropdown, select accounts from a **List** of accounts and **Folders**. 1. Select **Apply** to run your query and display the information you selected. - Select **Reset filter** to discard your changes. + Select **Reset filter** to discard your changes. ### Apply filters by identity type @@ -152,15 +152,15 @@ You can filter user details by type of user, user role, app, or service used, or ## Export the results of your query -- To export a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. +- To export a report of the results of your query as a comma-separated values (CSV) file, select **Export**, and then select **CSV**. - To export the data in a detailed comma-separated values (CSV) file format, select **Export** and then select **CSV (Detailed)**. - To export a report of user permissions, select **Export** and then select **Permissions**. ## Next steps -- To view active tasks, see [View analytic information about active tasks](cloudknox-usage-analytics-active-tasks.md). -- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](cloudknox-usage-analytics-groups.md). -- To view active resources, see [View analytic information about active resources](cloudknox-usage-analytics-active-resources.md). -- To view the permission usage of access keys for a given user, see [View analytic information about access keys](cloudknox-usage-analytics-access-keys.md). -- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](cloudknox-usage-analytics-serverless-functions.md). +- To view active tasks, see [View analytic information about active tasks](usage-analytics-active-tasks.md). +- To view assigned permissions and usage of the group and the group members, see [View analytic information about groups](usage-analytics-groups.md). +- To view active resources, see [View analytic information about active resources](usage-analytics-active-resources.md). +- To view the permission usage of access keys for a given user, see [View analytic information about access keys](usage-analytics-access-keys.md). +- To view assigned permissions and usage of the serverless functions, see [View analytic information about serverless functions](usage-analytics-serverless-functions.md). \ No newline at end of file diff --git a/articles/active-directory/develop/developer-glossary.md b/articles/active-directory/develop/developer-glossary.md index ac417d44e0c9..8a670becc121 100644 --- a/articles/active-directory/develop/developer-glossary.md +++ b/articles/active-directory/develop/developer-glossary.md @@ -1,44 +1,42 @@ --- -title: Microsoft identity platform developer glossary | Azure -description: A list of terms for commonly used Microsoft identity platform developer concepts and features. +title: Glossary of terms in the Microsoft identity platform +description: Definitions of terms commonly found in Microsoft identity platform documentation, Azure portal, and authentication SDKs like the Microsoft Authentication Library (MSAL). services: active-directory author: rwike77 manager: CelesteDG ms.service: active-directory ms.subservice: develop -ms.topic: conceptual -ms.workload: identity -ms.date: 12/14/2021 +ms.topic: reference +ms.date: 05/28/2022 ms.author: ryanwi -ms.custom: aaddev -ms.reviewer: jmprieur, saeeda, jesakowi, nacanuma +ms.reviewer: mmacy --- -# Microsoft identity platform developer glossary +# Glossary: Microsoft identity platform -This article contains definitions for some of the core developer concepts and terminology, which are helpful when learning about application development using Microsoft identity platform. +You'll see these terms when you use our documentation, the Azure portal, our authentication libraries, and the Microsoft Graph API. Some terms are Microsoft-specific while others are related to protocols like OAuth or other technologies you use with the Microsoft identity platform. ## Access token -A type of [security token](#security-token) issued by an [authorization server](#authorization-server), and used by a [client application](#client-application) in order to access a [protected resource server](#resource-server). Typically in the form of a [JSON Web Token (JWT)][JWT], the token embodies the authorization granted to the client by the [resource owner](#resource-owner), for a requested level of access. The token contains all applicable [claims](#claim) about the subject, enabling the client application to use it as a form of credential when accessing a given resource. This also eliminates the need for the resource owner to expose credentials to the client. +A type of [security token](#security-token) issued by an [authorization server](#authorization-server) and used by a [client application](#client-application) to access a [protected resource server](#resource-server). Typically in the form of a [JSON Web Token (JWT)][JWT], the token embodies the authorization granted to the client by the [resource owner](#resource-owner), for a requested level of access. The token contains all applicable [claims](#claim) about the subject, enabling the client application to use it as a form of credential when accessing a given resource. This also eliminates the need for the resource owner to expose credentials to the client. -Access tokens are only valid for a short period of time and cannot be revoked. An authorization server may also issue a [refresh token](#refresh-token) when the access token is issued. Refresh tokens are typically provided only to confidential client applications. +Access tokens are only valid for a short period of time and can't be revoked. An authorization server may also issue a [refresh token](#refresh-token) when the access token is issued. Refresh tokens are typically provided only to confidential client applications. Access tokens are sometimes referred to as "User+App" or "App-Only", depending on the credentials being represented. For example, when a client application uses the: -* ["Authorization code" authorization grant](#authorization-grant), the end user authenticates first as the resource owner, delegating authorization to the client to access the resource. The client authenticates afterward when obtaining the access token. The token can sometimes be referred to more specifically as a "User+App" token, as it represents both the user that authorized the client application, and the application. -* ["Client credentials" authorization grant](#authorization-grant), the client provides the sole authentication, functioning without the resource-owner's authentication/authorization, so the token can sometimes be referred to as an "App-Only" token. +- ["Authorization code" authorization grant](#authorization-grant), the end user authenticates first as the resource owner, delegating authorization to the client to access the resource. The client authenticates afterward when obtaining the access token. The token can sometimes be referred to more specifically as a "User+App" token, as it represents both the user that authorized the client application, and the application. +- ["Client credentials" authorization grant](#authorization-grant), the client provides the sole authentication, functioning without the resource-owner's authentication/authorization, so the token can sometimes be referred to as an "App-Only" token. See the [access tokens reference][AAD-Tokens-Claims] for more details. ## Actor -Another term for the [client application](#client-application) - this is the party acting on behalf of the subject, or [resource owner](#resource-owner). +Another term for the [client application](#client-application). The actor is the party acting on behalf of a subject ([resource owner](#resource-owner)). -## Application ID (client ID) +## Application (client) ID -The unique identifier Azure AD issues to an application registration that identifies a specific application and the associated configurations. This application ID ([client ID](https://tools.ietf.org/html/rfc6749#page-15)) is used when performing authentication requests and is provided to the authentication libraries in development time. The application ID (client ID) is not a secret. +The application ID, or _[client ID](https://datatracker.ietf.org/doc/html/rfc6749#section-2.2)_, is a value the Microsoft identity platform assigns to your application when you register it in Azure AD. The application ID is a GUID value that uniquely identifies the application and its configuration within the identity platform. You add the app ID to your application's code, and authentication libraries include the value in their requests to the identity platform at application runtime. The application (client) ID isn't a secret - don't use it as a password or other credential. ## Application manifest @@ -46,62 +44,62 @@ A feature provided by the [Azure portal][AZURE-portal], which produces a JSON re ## Application object -When you register/update an application in the [Azure portal][AZURE-portal], the portal creates/updates both an application object and a corresponding [service principal object](#service-principal-object) for that tenant. The application object *defines* the application's identity configuration globally (across all tenants where it has access), providing a template from which its corresponding service principal object(s) are *derived* for use locally at run-time (in a specific tenant). +When you register/update an application in the [Azure portal][AZURE-portal], the portal creates/updates both an application object and a corresponding [service principal object](#service-principal-object) for that tenant. The application object _defines_ the application's identity configuration globally (across all tenants where it has access), providing a template from which its corresponding service principal object(s) are _derived_ for use locally at run-time (in a specific tenant). For more information, see [Application and Service Principal Objects][AAD-App-SP-Objects]. ## Application registration -In order to allow an application to integrate with and delegate Identity and Access Management functions to Azure AD, it must be registered with an Azure AD [tenant](#tenant). When you register your application with Azure AD, you are providing an identity configuration for your application, allowing it to integrate with Azure AD and use features such as: +In order to allow an application to integrate with and delegate Identity and Access Management functions to Azure AD, it must be registered with an Azure AD [tenant](#tenant). When you register your application with Azure AD, you're providing an identity configuration for your application, allowing it to integrate with Azure AD and use features like: -* Robust management of Single Sign-On using Azure AD Identity Management and [OpenID Connect][OpenIDConnect] protocol implementation -* Brokered access to [protected resources](#resource-server) by [client applications](#client-application), via OAuth 2.0 [authorization server](#authorization-server) -* [Consent framework](#consent) for managing client access to protected resources, based on resource owner authorization. +- Robust management of Single Sign-On using Azure AD Identity Management and [OpenID Connect][OpenIDConnect] protocol implementation +- Brokered access to [protected resources](#resource-server) by [client applications](#client-application), via OAuth 2.0 [authorization server](#authorization-server) +- [Consent framework](#consent) for managing client access to protected resources, based on resource owner authorization. See [Integrating applications with Azure Active Directory][AAD-Integrating-Apps] for more details. ## Authentication -The act of challenging a party for legitimate credentials, providing the basis for creation of a security principal to be used for identity and access control. During an [OAuth2 authorization grant](#authorization-grant) for example, the party authenticating is filling the role of either [resource owner](#resource-owner) or [client application](#client-application), depending on the grant used. +The act of challenging a party for legitimate credentials, providing the basis for creation of a security principal to be used for identity and access control. During an [OAuth 2.0 authorization grant](#authorization-grant) for example, the party authenticating is filling the role of either [resource owner](#resource-owner) or [client application](#client-application), depending on the grant used. ## Authorization The act of granting an authenticated security principal permission to do something. There are two primary use cases in the Azure AD programming model: -* During an [OAuth2 authorization grant](#authorization-grant) flow: when the [resource owner](#resource-owner) grants authorization to the [client application](#client-application), allowing the client to access the resource owner's resources. -* During resource access by the client: as implemented by the [resource server](#resource-server), using the [claim](#claim) values present in the [access token](#access-token) to make access control decisions based upon them. +- During an [OAuth 2.0 authorization grant](#authorization-grant) flow: when the [resource owner](#resource-owner) grants authorization to the [client application](#client-application), allowing the client to access the resource owner's resources. +- During resource access by the client: as implemented by the [resource server](#resource-server), using the [claim](#claim) values present in the [access token](#access-token) to make access control decisions based upon them. ## Authorization code -A short lived "token" provided to a [client application](#client-application) by the [authorization endpoint](#authorization-endpoint), as part of the "authorization code" flow, one of the four OAuth2 [authorization grants](#authorization-grant). The code is returned to the client application in response to authentication of a [resource owner](#resource-owner), indicating the resource owner has delegated authorization to access the requested resources. As part of the flow, the code is later redeemed for an [access token](#access-token). +A short-lived value provided by the [authorization endpoint](#authorization-endpoint) to a [client application](#client-application) during the OAuth 2.0 _authorization code grant flow_, one of the four OAuth 2.0 [authorization grants](#authorization-grant). Also called an _auth code_, the authorization code is returned to the client application in response to the authentication of a [resource owner](#resource-owner). The auth code indicates the resource owner has delegated authorization to the client application to access their resources. As part of the flow, the auth code is later redeemed for an [access token](#access-token). ## Authorization endpoint -One of the endpoints implemented by the [authorization server](#authorization-server), used to interact with the [resource owner](#resource-owner) in order to provide an [authorization grant](#authorization-grant) during an OAuth2 authorization grant flow. Depending on the authorization grant flow used, the actual grant provided can vary, including an [authorization code](#authorization-code) or [security token](#security-token). +One of the endpoints implemented by the [authorization server](#authorization-server), used to interact with the [resource owner](#resource-owner) to provide an [authorization grant](#authorization-grant) during an OAuth 2.0 authorization grant flow. Depending on the authorization grant flow used, the actual grant provided can vary, including an [authorization code](#authorization-code) or [security token](#security-token). -See the OAuth2 specification's [authorization grant types][OAuth2-AuthZ-Grant-Types] and [authorization endpoint][OAuth2-AuthZ-Endpoint] sections, and the [OpenIDConnect specification][OpenIDConnect-AuthZ-Endpoint] for more details. +See the OAuth 2.0 specification's [authorization grant types][OAuth2-AuthZ-Grant-Types] and [authorization endpoint][OAuth2-AuthZ-Endpoint] sections, and the [OpenIDConnect specification][OpenIDConnect-AuthZ-Endpoint] for more details. ## Authorization grant -A credential representing the [resource owner's](#resource-owner) [authorization](#authorization) to access its protected resources, granted to a [client application](#client-application). A client application can use one of the [four grant types defined by the OAuth2 Authorization Framework][OAuth2-AuthZ-Grant-Types] to obtain a grant, depending on client type/requirements: "authorization code grant", "client credentials grant", "implicit grant", and "resource owner password credentials grant". The credential returned to the client is either an [access token](#access-token), or an [authorization code](#authorization-code) (exchanged later for an access token), depending on the type of authorization grant used. +A credential representing the [resource owner's](#resource-owner) [authorization](#authorization) to access its protected resources, granted to a [client application](#client-application). A client application can use one of the [four grant types defined by the OAuth 2.0 Authorization Framework][OAuth2-AuthZ-Grant-Types] to obtain a grant, depending on client type/requirements: "authorization code grant", "client credentials grant", "implicit grant", and "resource owner password credentials grant". The credential returned to the client is either an [access token](#access-token), or an [authorization code](#authorization-code) (exchanged later for an access token), depending on the type of authorization grant used. ## Authorization server -As defined by the [OAuth2 Authorization Framework][OAuth2-Role-Def], the server responsible for issuing access tokens to the [client](#client-application) after successfully authenticating the [resource owner](#resource-owner) and obtaining its authorization. A [client application](#client-application) interacts with the authorization server at runtime via its [authorization](#authorization-endpoint) and [token](#token-endpoint) endpoints, in accordance with the OAuth2 defined [authorization grants](#authorization-grant). +As defined by the [OAuth 2.0 Authorization Framework][OAuth2-Role-Def], the server responsible for issuing access tokens to the [client](#client-application) after successfully authenticating the [resource owner](#resource-owner) and obtaining its authorization. A [client application](#client-application) interacts with the authorization server at runtime via its [authorization](#authorization-endpoint) and [token](#token-endpoint) endpoints, in accordance with the OAuth 2.0 defined [authorization grants](#authorization-grant). In the case of the Microsoft identity platform application integration, the Microsoft identity platform implements the authorization server role for Azure AD applications and Microsoft service APIs, for example [Microsoft Graph APIs][Microsoft-Graph]. ## Claim -A [security token](#security-token) contains claims, which provide assertions about one entity (such as a [client application](#client-application) or [resource owner](#resource-owner)) to another entity (such as the [resource server](#resource-server)). Claims are name/value pairs that relay facts about the token subject (for example, the security principal that was authenticated by the [authorization server](#authorization-server)). The claims present in a given token are dependent upon several variables, including the type of token, the type of credential used to authenticate the subject, the application configuration, etc. +Claims are name/values pairs in a [security token](#security-token) that provide assertions made by one entity to another. These entities are typically the [client application](#client-application) or a [resource owner](#resource-owner) providing assertions to a [resource server](#resource-server). Claims relay facts about the token subject like the ID of the security principal that was authenticated by the [authorization server](#authorization-server). The claims present in a token can vary and depend on several factors like the type of token, type of credential used for authenticating the subject, the application configuration, and others. See the [Microsoft identity platform token reference][AAD-Tokens-Claims] for more details. ## Client application -Also known as the "[actor](#actor)". As defined by the [OAuth2 Authorization Framework][OAuth2-Role-Def], an application that makes protected resource requests on behalf of the [resource owner](#resource-owner). They receive permissions from the resource owner in the form of scopes. The term "client" does not imply any particular hardware implementation characteristics (for instance, whether the application executes on a server, a desktop, or other devices). +Also known as the "[actor](#actor)". As defined by the [OAuth 2.0 Authorization Framework][OAuth2-Role-Def], an application that makes protected resource requests on behalf of the [resource owner](#resource-owner). They receive permissions from the resource owner in the form of scopes. The term "client" doesn't imply any particular hardware implementation characteristics (for instance, whether the application executes on a server, a desktop, or other devices). -A client application requests [authorization](#authorization) from a resource owner to participate in an [OAuth2 authorization grant](#authorization-grant) flow, and may access APIs/data on the resource owner's behalf. The OAuth2 Authorization Framework [defines two types of clients][OAuth2-Client-Types], "confidential" and "public", based on the client's ability to maintain the confidentiality of its credentials. Applications can implement a [web client (confidential)](#web-client) which runs on a web server, a [native client (public)](#native-client) installed on a device, or a [user-agent-based client (public)](#user-agent-based-client) which runs in a device's browser. +A client application requests [authorization](#authorization) from a resource owner to participate in an [OAuth 2.0 authorization grant](#authorization-grant) flow, and may access APIs/data on the resource owner's behalf. The OAuth 2.0 Authorization Framework [defines two types of clients][OAuth2-Client-Types], "confidential" and "public", based on the client's ability to maintain the confidentiality of its credentials. Applications can implement a [web client (confidential)](#web-client) which runs on a web server, a [native client (public)](#native-client) installed on a device, or a [user-agent-based client (public)](#user-agent-based-client) which runs in a device's browser. ## Consent @@ -111,7 +109,7 @@ See [consent framework](consent-framework.md) for more information. ## ID token -An [OpenID Connect][OpenIDConnect-ID-Token] [security token](#security-token) provided by an [authorization server's](#authorization-server) [authorization endpoint](#authorization-endpoint), which contains [claims](#claim) pertaining to the authentication of an end user [resource owner](#resource-owner). Like an access token, ID tokens are also represented as a digitally signed [JSON Web Token (JWT)][JWT]. Unlike an access token though, an ID token's claims are not used for purposes related to resource access and specifically access control. +An [OpenID Connect][OpenIDConnect-ID-Token] [security token](#security-token) provided by an [authorization server's](#authorization-server) [authorization endpoint](#authorization-endpoint), which contains [claims](#claim) pertaining to the authentication of an end user [resource owner](#resource-owner). Like an access token, ID tokens are also represented as a digitally signed [JSON Web Token (JWT)][JWT]. Unlike an access token though, an ID token's claims aren't used for purposes related to resource access and specifically access control. See the [ID token reference](id-tokens.md) for more details. @@ -121,7 +119,7 @@ Eliminate the need for developers to manage credentials. Managed identities prov ## Microsoft identity platform -The Microsoft identity platform is an evolution of the Azure Active Directory (Azure AD) identity service and developer platform. It allows developers to build applications that sign in all Microsoft identities, get tokens to call Microsoft Graph, other Microsoft APIs, or APIs that developers have built. It’s a full-featured platform that consists of an authentication service, libraries, application registration and configuration, full developer documentation, code samples, and other developer content. The Microsoft identity platform supports industry standard protocols such as OAuth 2.0 and OpenID Connect. +The Microsoft identity platform is an evolution of the Azure Active Directory (Azure AD) identity service and developer platform. It allows developers to build applications that sign in all Microsoft identities, get tokens to call Microsoft Graph, other Microsoft APIs, or APIs that developers have built. It's a full-featured platform that consists of an authentication service, libraries, application registration and configuration, full developer documentation, code samples, and other developer content. The Microsoft identity platform supports industry standard protocols such as OAuth 2.0 and OpenID Connect. ## Multi-tenant application @@ -131,14 +129,14 @@ See [How to sign in any Azure AD user using the multi-tenant application pattern ## Native client -A type of [client application](#client-application) that is installed natively on a device. Since all code is executed on a device, it is considered a "public" client due to its inability to store credentials privately/confidentially. See [OAuth2 client types and profiles][OAuth2-Client-Types] for more details. +A type of [client application](#client-application) that is installed natively on a device. Since all code is executed on a device, it's considered a "public" client due to its inability to store credentials privately/confidentially. See [OAuth 2.0 client types and profiles][OAuth2-Client-Types] for more details. ## Permissions A [client application](#client-application) gains access to a [resource server](#resource-server) by declaring permission requests. Two types are available: -* "Delegated" permissions, which specify [scope-based](#scopes) access using delegated authorization from the signed-in [resource owner](#resource-owner), are presented to the resource at run-time as ["scp" claims](#claim) in the client's [access token](#access-token). These indicate the permission granted to the [actor](#actor) by the [subject](#subject). -* "Application" permissions, which specify [role-based](#roles) access using the client application's credentials/identity, are presented to the resource at run-time as ["roles" claims](#claim) in the client's access token. These indicate permissions granted to the [subject](#subject) by the tenant. +- "Delegated" permissions, which specify [scope-based](#scopes) access using delegated authorization from the signed-in [resource owner](#resource-owner), are presented to the resource at run-time as ["scp" claims](#claim) in the client's [access token](#access-token). These indicate the permission granted to the [actor](#actor) by the [subject](#subject). +- "Application" permissions, which specify [role-based](#roles) access using the client application's credentials/identity, are presented to the resource at run-time as ["roles" claims](#claim) in the client's access token. These indicate permissions granted to the [subject](#subject) by the tenant. They also surface during the [consent](#consent) process, giving the administrator or resource owner the opportunity to grant/deny the client access to resources in their tenant. @@ -146,21 +144,21 @@ Permission requests are configured on the **API permissions** page for an applic ## Refresh token -A type of [security token](#security-token) issued by an [authorization server](#authorization-server), and used by a [client application](#client-application) in order to request a new [access token](#access-token) before the access token expires. Typically in the form of a [JSON Web Token (JWT)][JWT]. +A type of [security token](#security-token) issued by an [authorization server](#authorization-server). Before an access token expires, a [client application](#client-application) includes its associated refresh token when it requests a new [access token](#access-token) from the authorization server. Refresh tokens are typically formatted as a [JSON Web Token (JWT)][JWT]. -Unlike access tokens, refresh tokens can be revoked. If a client application attempts to request a new access token using a refresh token that has been revoked, the authorization server will deny the request, and the client application will no longer have permission to access the [resource server](#resource-server) on behalf of the [resource owner](#resource-owner). +Unlike access tokens, refresh tokens can be revoked. An authorization server denies any request from a client application that includes a refresh token that has been revoked. When the authorization server denies a request that includes a revoked refresh token, the client application loses the permission to access the [resource server](#resource-server) on behalf of the [resource owner](#resource-owner). See the [refresh tokens](refresh-tokens.md) for more details. ## Resource owner -As defined by the [OAuth2 Authorization Framework][OAuth2-Role-Def], an entity capable of granting access to a protected resource. When the resource owner is a person, it is referred to as an end user. For example, when a [client application](#client-application) wants to access a user's mailbox through the [Microsoft Graph API][Microsoft-Graph], it requires permission from the resource owner of the mailbox. The "resource owner" is also sometimes called the [subject](#subject). +As defined by the [OAuth 2.0 Authorization Framework][OAuth2-Role-Def], an entity capable of granting access to a protected resource. When the resource owner is a person, it's referred to as an end user. For example, when a [client application](#client-application) wants to access a user's mailbox through the [Microsoft Graph API][Microsoft-Graph], it requires permission from the resource owner of the mailbox. The "resource owner" is also sometimes called the [subject](#subject). -Every [security token](#security-token) represents a resource owner. The resource owner is what the subject [claim](#claim), object ID claim, and personal data in the token represent. Resource owners are the party that grants delegated permissions to a client application, in the form of scopes. Resource owners are also the recipients of [roles](#roles) that indicate expanded permissions within a tenant or on an application. +Every [security token](#security-token) represents a resource owner. The resource owner is what the subject [claim](#claim), object ID claim, and personal data in the token represent. Resource owners are the party that grants delegated permissions to a client application, in the form of scopes. Resource owners are also the recipients of [roles](#roles) that indicate expanded permissions within a tenant or on an application. ## Resource server -As defined by the [OAuth2 Authorization Framework][OAuth2-Role-Def], a server that hosts protected resources, capable of accepting and responding to protected resource requests by [client applications](#client-application) that present an [access token](#access-token). Also known as a protected resource server, or resource application. +As defined by the [OAuth 2.0 Authorization Framework][OAuth2-Role-Def], a server that hosts protected resources, capable of accepting and responding to protected resource requests by [client applications](#client-application) that present an [access token](#access-token). Also known as a protected resource server, or resource application. A resource server exposes APIs and enforces access to its protected resources through [scopes](#scopes) and [roles](#roles), using the OAuth 2.0 Authorization Framework. Examples include the [Microsoft Graph API][Microsoft-Graph], which provides access to Azure AD tenant data, and the Microsoft 365 APIs that provide access to data such as mail and calendar. @@ -168,9 +166,9 @@ Just like a client application, resource application's identity configuration is ## Roles -Like [scopes](#scopes), app roles provide a way for a [resource server](#resource-server) to govern access to its protected resources. Unlike scopes, roles represent privileges that the [subject](#subject) has been granted beyond the baseline - this is why reading your own email is a scope, while being an email administrator that can read everyone's email is a role. +Like [scopes](#scopes), app roles provide a way for a [resource server](#resource-server) to govern access to its protected resources. Unlike scopes, roles represent privileges that the [subject](#subject) has been granted beyond the baseline - this is why reading your own email is a scope, while being an email administrator that can read everyone's email is a role. -App roles can support two assignment types: "user" assignment implements role-based access control for users/groups that require access to the resource, while "application" assignment implements the same for [client applications](#client-application) that require access. An app role can be defined as user-assignable, app-assignabnle, or both. +App roles can support two assignment types: "user" assignment implements role-based access control for users/groups that require access to the resource, while "application" assignment implements the same for [client applications](#client-application) that require access. An app role can be defined as user-assignable, app-assignabnle, or both. Roles are resource-defined strings (for example "Expense approver", "Read-only", "Directory.ReadWrite.All"), managed in the [Azure portal][AZURE-portal] via the resource's [application manifest](#application-manifest), and stored in the resource's [appRoles property][Graph-Sp-Resource]. The Azure portal is also used to assign users to "user" assignable roles, and configure client [application permissions](#permissions) to request "application" assignable roles. @@ -186,17 +184,17 @@ A best practice naming convention, is to use a "resource.operation.constraint" f ## Security token -A signed document containing claims, such as an OAuth2 token or SAML 2.0 assertion. For an OAuth2 [authorization grant](#authorization-grant), an [access token](#access-token) (OAuth2), [refresh token](#refresh-token), and an [ID Token](https://openid.net/specs/openid-connect-core-1_0.html#IDToken) are types of security tokens, all of which are implemented as a [JSON Web Token (JWT)][JWT]. +A signed document containing claims, such as an OAuth 2.0 token or SAML 2.0 assertion. For an OAuth 2.0 [authorization grant](#authorization-grant), an [access token](#access-token) (OAuth2), [refresh token](#refresh-token), and an [ID Token](https://openid.net/specs/openid-connect-core-1_0.html#IDToken) are types of security tokens, all of which are implemented as a [JSON Web Token (JWT)][JWT]. ## Service principal object -When you register/update an application in the [Azure portal][AZURE-portal], the portal creates/updates both an [application object](#application-object) and a corresponding service principal object for that tenant. The application object *defines* the application's identity configuration globally (across all tenants where the associated application has been granted access), and is the template from which its corresponding service principal object(s) are *derived* for use locally at run-time (in a specific tenant). +When you register/update an application in the [Azure portal][AZURE-portal], the portal creates/updates both an [application object](#application-object) and a corresponding service principal object for that tenant. The application object _defines_ the application's identity configuration globally (across all tenants where the associated application has been granted access), and is the template from which its corresponding service principal object(s) are _derived_ for use locally at run-time (in a specific tenant). For more information, see [Application and Service Principal Objects][AAD-App-SP-Objects]. ## Sign-in -The process of a [client application](#client-application) initiating end-user authentication and capturing related state, for the purpose of acquiring a [security token](#security-token) and scoping the application session to that state. State can include artifacts such as user profile information, and information derived from token claims. +The process of a [client application](#client-application) initiating end-user authentication and capturing related state for requesting a [security token](#security-token) and scoping the application session to that state. State can include artifacts like user profile information, and information derived from token claims. The sign-in function of an application is typically used to implement single-sign-on (SSO). It may also be preceded by a "sign-up" function, as the entry point for an end user to gain access to an application (upon first sign-in). The sign-up function is used to gather and persist additional state specific to the user, and may require [user consent](#consent). @@ -206,37 +204,37 @@ The process of unauthenticating an end user, detaching the user state associated ## Subject -Also known as the [resource owner](#resource-owner). +Also known as the [resource owner](#resource-owner). ## Tenant An instance of an Azure AD directory is referred to as an Azure AD tenant. It provides several features, including: -* a registry service for integrated applications -* authentication of user accounts and registered applications -* REST endpoints required to support various protocols including OAuth2 and SAML, including the [authorization endpoint](#authorization-endpoint), [token endpoint](#token-endpoint) and the "common" endpoint used by [multi-tenant applications](#multi-tenant-application). +- a registry service for integrated applications +- authentication of user accounts and registered applications +- REST endpoints required to support various protocols including OAuth 2.0 and SAML, including the [authorization endpoint](#authorization-endpoint), [token endpoint](#token-endpoint) and the "common" endpoint used by [multi-tenant applications](#multi-tenant-application). Azure AD tenants are created/associated with Azure and Microsoft 365 subscriptions during sign-up, providing Identity & Access Management features for the subscription. Azure subscription administrators can also create additional Azure AD tenants via the Azure portal. See [How to get an Azure Active Directory tenant][AAD-How-To-Tenant] for details on the various ways you can get access to a tenant. See [Associate or add an Azure subscription to your Azure Active Directory tenant][AAD-How-Subscriptions-Assoc] for details on the relationship between subscriptions and an Azure AD tenant, and for instructions on how to associate or add a subscription to an Azure AD tenant. ## Token endpoint -One of the endpoints implemented by the [authorization server](#authorization-server) to support OAuth2 [authorization grants](#authorization-grant). Depending on the grant, it can be used to acquire an [access token](#access-token) (and related "refresh" token) to a [client](#client-application), or [ID token](#id-token) when used with the [OpenID Connect][OpenIDConnect] protocol. +One of the endpoints implemented by the [authorization server](#authorization-server) to support OAuth 2.0 [authorization grants](#authorization-grant). Depending on the grant, it can be used to acquire an [access token](#access-token) (and related "refresh" token) to a [client](#client-application), or [ID token](#id-token) when used with the [OpenID Connect][OpenIDConnect] protocol. ## User-agent-based client -A type of [client application](#client-application) that downloads code from a web server and executes within a user-agent (for instance, a web browser), such as a single-page application (SPA). Since all code is executed on a device, it is considered a "public" client due to its inability to store credentials privately/confidentially. For more information, see [OAuth2 client types and profiles][OAuth2-Client-Types]. +A type of [client application](#client-application) that downloads code from a web server and executes within a user-agent (for instance, a web browser), such as a single-page application (SPA). Since all code is executed on a device, it is considered a "public" client due to its inability to store credentials privately/confidentially. For more information, see [OAuth 2.0 client types and profiles][OAuth2-Client-Types]. ## User principal -Similar to the way a service principal object is used to represent an application instance, a user principal object is another type of security principal, which represents a user. The Microsoft Graph [User resource type][Graph-User-Resource] defines the schema for a user object, including user-related properties such as first and last name, user principal name, directory role membership, etc. This provides the user identity configuration for Azure AD to establish a user principal at run-time. The user principal is used to represent an authenticated user for Single Sign-On, recording [consent](#consent) delegation, making access control decisions, etc. +Similar to the way a service principal object is used to represent an application instance, a user principal object is another type of security principal, which represents a user. The Microsoft Graph [User resource type][Graph-User-Resource] defines the schema for a user object, including user-related properties like first and last name, user principal name, directory role membership, etc. This provides the user identity configuration for Azure AD to establish a user principal at run-time. The user principal is used to represent an authenticated user for Single Sign-On, recording [consent](#consent) delegation, making access control decisions, etc. ## Web client -A type of [client application](#client-application) that executes all code on a web server, and able to function as a "confidential" client by securely storing its credentials on the server. For more information, see [OAuth2 client types and profiles][OAuth2-Client-Types]. +A type of [client application](#client-application) that executes all code on a web server, functioning as a _confidential client_ because it can securely store its credentials on the server. For more information, see [OAuth 2.0 client types and profiles][OAuth2-Client-Types]. ## Workload identity -An identity used by a software workload (such as an application, service, script, or container) to authenticate and access other services and resources. In Azure AD, workload identities are apps, service principals, and managed identities. For more information, see [workload identity overview](workload-identities-overview.md). +An identity used by a software workload like an application, service, script, or container to authenticate and access other services and resources. In Azure AD, workload identities are apps, service principals, and managed identities. For more information, see [workload identity overview](workload-identities-overview.md). ## Workload identity federation @@ -244,9 +242,9 @@ Allows you to securely access Azure AD protected resources from external apps an ## Next steps -The [Microsoft identity platform Developer's Guide][AAD-Dev-Guide] is the landing page to use for all the Microsoft identity platform development-related topics, including an overview of [application integration][AAD-How-To-Integrate] and the basics of the [Microsoft identity platform authentication and supported authentication scenarios][AAD-Auth-Scenarios]. You can also find code samples & tutorials on how to get up and running quickly on [GitHub](https://github.com/azure-samples?utf8=%E2%9C%93&q=active%20directory&type=&language=). +Many of the terms in this glossary are related to the OAuth 2.0 and OpenID Connect protocols. Though you don't need to know how the protocols work "on the wire" to use the identity platform, knowing some protocol basics can help you more easily build and debug authentication and authorization in your apps: -Use the following comments section to provide feedback and help to refine and shape this content, including requests for new definitions or updating existing ones! +- [OAuth 2.0 and OpenID Connect (OIDC) in the Microsoft identity platform](active-directory-v2-protocols.md) @@ -278,4 +276,4 @@ Use the following comments section to provide feedback and help to refine and sh [OAuth2-Role-Def]: https://tools.ietf.org/html/rfc6749#page-6 [OpenIDConnect]: https://openid.net/specs/openid-connect-core-1_0.html [OpenIDConnect-AuthZ-Endpoint]: https://openid.net/specs/openid-connect-core-1_0.html#AuthorizationEndpoint -[OpenIDConnect-ID-Token]: https://openid.net/specs/openid-connect-core-1_0.html#IDToken \ No newline at end of file +[OpenIDConnect-ID-Token]: https://openid.net/specs/openid-connect-core-1_0.html#IDToken diff --git a/articles/active-directory/develop/includes/web-app/quickstart-nodejs-msal.md b/articles/active-directory/develop/includes/web-app/quickstart-nodejs-msal.md index 6ccdb4ffd983..ed2c683cefa7 100644 --- a/articles/active-directory/develop/includes/web-app/quickstart-nodejs-msal.md +++ b/articles/active-directory/develop/includes/web-app/quickstart-nodejs-msal.md @@ -16,7 +16,7 @@ ms.custom: aaddev, scenarios:getting-started, languages:js, devx-track-js # Customer intent: As an application developer, I want to know how to set up authentication in a web application built using Node.js and MSAL Node. --- -In this quickstart, you download and run a code sample that demonstrates how a Node.js web app can sign in users by using the authorization code flow. The code sample also demonstrates how to get an access token to call Microsoft Graph API. +In this quickstart, you download and run a code sample that demonstrates how a Node.js web app can sign in users by using the authorization code flow. The code sample also demonstrates how to get an access token to call the Microsoft Graph API. See [How the sample works](#how-the-sample-works) for an illustration. @@ -37,8 +37,8 @@ This quickstart uses the Microsoft Authentication Library for Node.js (MSAL Node 1. If you have access to multiple tenants, use the **Directories + subscriptions** filter :::image type="icon" source="../../media/common/portal-directory-subscription-filter.png" border="false"::: in the top menu to switch to the tenant in which you want to register the application. 1. Under **Manage**, select **App registrations** > **New registration**. 1. Enter a **Name** for your application. Users of your app might see this name, and you can change it later. -1. Under **Supported account types**, select **Accounts in any organizational directory and personal Microsoft accounts**. -1. Set the **Redirect URI** value to `http://localhost:3000/redirect`. +1. Under **Supported account types**, select **Accounts in this organizational directory only**. +1. Set the **Redirect URI** value to `http://localhost:3000/auth/redirect`. 1. Select **Register**. 1. On the app **Overview** page, note the **Application (client) ID** value for later use. 1. Under **Manage**, select **Certificates & secrets** > **Client secrets** > **New client secret**. Leave the description blank and default expiration, and then select **Add**. @@ -51,45 +51,34 @@ To run the project with a web server by using Node.js, [download the core projec #### Step 3: Configure your Node app -Extract the project, open the *ms-identity-node-main* folder, and then open the *index.js* file. - -Set the `clientID` value with the application (client) ID, and then set the `clientSecret` value with the client secret. - -```javascript -const config = { - auth: { - clientId: "Enter_the_Application_Id_Here", - authority: "https://login.microsoftonline.com/common", - clientSecret: "Enter_the_Client_Secret_Here" - }, -    system: { -        loggerOptions: { -            loggerCallback(loglevel, message, containsPii) { -                console.log(message); -            }, -         piiLoggingEnabled: false, -         logLevel: msal.LogLevel.Verbose, -        } -    } -}; -``` - +Extract the project, open the *ms-identity-node-main* folder, and then open the *.env* file under the *App* folder. Replace the values above as follows: -Modify the values in the `config` section: +| Variable | Description | Example(s) | +|-----------|--------------|------------| +| `Enter_the_Cloud_Instance_Id_Here` | The Azure cloud instance in which your application is registered | `https://login.microsoftonline.com/` (include the trailing forward-slash) | +| `Enter_the_Tenant_Info_here` | Tenant ID or Primary domain | `contoso.microsoft.com` or `cbe899ec-5f5c-4efe-b7a0-599505d3d54f` | +| `Enter_the_Application_Id_Here` | Client ID of the application you registered | `cbe899ec-5f5c-4efe-b7a0-599505d3d54f` | +| `Enter_the_Client_Secret_Here` | Client secret of the application you registered | `WxvhStRfDXoEiZQj1qCy` | +| `Enter_the_Graph_Endpoint_Here` | The Microsoft Graph API cloud instance that your app will call | `https://graph.microsoft.com/` (include the trailing forward-slash) | +| `Enter_the_Express_Session_Secret_Here` | A random string of characters used to sign the Express session cookie | `WxvhStRfDXoEiZQj1qCy` | -- `Enter_the_Application_Id_Here` is the application (client) ID for the application you registered. +Your file should look similar to below: - To find the application (client) ID, go to the app registration's **Overview** page in the Azure portal. -- `Enter_the_Client_Secret_Here` is the client secret for the application you registered. +```text +CLOUD_INSTANCE=https://login.microsoftonline.com/ +TENANT_ID=cbe899ec-5f5c-4efe-b7a0-599505d3d54f +CLIENT_ID=fa29b4c9-7675-4b61-8a0a-bf7b2b4fda91 +CLIENT_SECRET=WxvhStRfDXoEiZQj1qCy - To retrieve or generate a new client secret, under **Manage**, select **Certificates & secrets**. +REDIRECT_URI=http://localhost:3000/auth/redirect +POST_LOGOUT_REDIRECT_URI=http://localhost:3000 -The default `authority` value represents the main (global) Azure cloud: +GRAPH_API_ENDPOINT=https://graph.microsoft.com/ -```javascript -authority: "https://login.microsoftonline.com/common", +EXPRESS_SESSION_SECRET=6DP6v09eLiW7f1E65B8k ``` + #### Step 4: Run the project Run the project by using Node.js. @@ -97,21 +86,22 @@ Run the project by using Node.js. 1. To start the server, run the following commands from within the project directory: ```console + cd App npm install npm start ``` 1. Go to `http://localhost:3000/`. -1. Select **Sign In** to start the sign-in process. +1. Select **Sign in** to start the sign-in process. - The first time you sign in, you're prompted to provide your consent to allow the application to access your profile and sign you in. After you're signed in successfully, you will see a log message in the command line. + The first time you sign in, you're prompted to provide your consent to allow the application to sign you in and access your profile. After you're signed in successfully, you'll be redirected back to the application home page. ## More information ### How the sample works -The sample hosts a web server on localhost, port 3000. When a web browser accesses this site, the sample immediately redirects the user to a Microsoft authentication page. Because of this, the sample does not contain any HTML or display elements. Authentication success displays the message "OK". +The sample hosts a web server on localhost, port 3000. When a web browser accesses this address, the app renders the home page. Once the user selects **Sign in**, the app redirects the browser to Azure AD sign-in screen, via the URL generated by the MSAL Node library. After user consents, the browser redirects the user back to the application home page, along with an ID and access token. ### MSAL Node @@ -123,5 +113,6 @@ npm install @azure/msal-node ## Next steps +Learn more about the web app scenario that the Microsoft identity platform supports: > [!div class="nextstepaction"] -> [Adding Auth to an existing web app - GitHub code sample >](https://github.com/AzureAD/microsoft-authentication-library-for-js/tree/dev/samples/msal-node-samples/auth-code) +> [Web app that signs in users scenario](../../scenario-web-app-sign-user-overview.md) diff --git a/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/graph-call-screen.png b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/graph-call-screen.png new file mode 100644 index 000000000000..366d9f849001 Binary files /dev/null and b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/graph-call-screen.png differ diff --git a/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/id-token-screen.png b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/id-token-screen.png new file mode 100644 index 000000000000..7b101c7d24bf Binary files /dev/null and b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/id-token-screen.png differ diff --git a/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/post-sign-in-screen.png b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/post-sign-in-screen.png new file mode 100644 index 000000000000..278d3c2a6b8d Binary files /dev/null and b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/post-sign-in-screen.png differ diff --git a/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/sign-out-screen.png b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/sign-out-screen.png new file mode 100644 index 000000000000..fc47e78a9ab3 Binary files /dev/null and b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/sign-out-screen.png differ diff --git a/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/welcome-screen.png b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/welcome-screen.png new file mode 100644 index 000000000000..f7a25c5d8aa1 Binary files /dev/null and b/articles/active-directory/develop/media/tutorial-v2-nodejs-webapp-msal/welcome-screen.png differ diff --git a/articles/active-directory/develop/mobile-app-quickstart-portal-android.md b/articles/active-directory/develop/mobile-app-quickstart-portal-android.md index edab8ca0d69b..0651ad9f6677 100644 --- a/articles/active-directory/develop/mobile-app-quickstart-portal-android.md +++ b/articles/active-directory/develop/mobile-app-quickstart-portal-android.md @@ -24,7 +24,7 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > > We apologize for the inconvenience and appreciate your patience while we work to get this resolved. -> [!div renderon="portal" class="sxs-lookup display-on-portal"] +> [!div renderon="portal" id="display-on-portal" class="sxs-lookup"] > # Quickstart: Sign in users and call the Microsoft Graph API from an Android app > > In this quickstart, you download and run a code sample that demonstrates how an Android application can sign in users and get an access token to call the Microsoft Graph API. @@ -42,7 +42,7 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > ### Step 1: Configure your application in the Azure portal > For the code sample in this quickstart to work, add a **Redirect URI** compatible with the Auth broker. > -> +> > > > [!div id="appconfigured" class="alert alert-info"] > > ![Already configured](media/quickstart-v2-android/green-check.png) Your application is configured with these attributes @@ -50,7 +50,9 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > ### Step 2: Download the project > > Run the project using Android Studio. -> +> +> > [!div class="nextstepaction"] +> > > > > ### Step 3: Your app is configured and ready to run @@ -484,4 +486,4 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > Move on to the Android tutorial in which you build an Android app that gets an access token from the Microsoft identity platform and uses it to call the Microsoft Graph API. > > > [!div class="nextstepaction"] -> > [Tutorial: Sign in users and call the Microsoft Graph from an Android application](tutorial-v2-android.md) \ No newline at end of file +> > [Tutorial: Sign in users and call the Microsoft Graph from an Android application](tutorial-v2-android.md) diff --git a/articles/active-directory/develop/mobile-app-quickstart-portal-ios.md b/articles/active-directory/develop/mobile-app-quickstart-portal-ios.md index 87e9bc2f40c5..5158c3110668 100644 --- a/articles/active-directory/develop/mobile-app-quickstart-portal-ios.md +++ b/articles/active-directory/develop/mobile-app-quickstart-portal-ios.md @@ -26,7 +26,7 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > > We apologize for the inconvenience and appreciate your patience while we work to get this resolved. -> [!div renderon="portal" class="sxs-lookup display-on-portal"] +> [!div renderon="portal" id="display-on-portal" class="sxs-lookup"] > # Quickstart: Sign in users and call the Microsoft Graph API from an iOS or macOS app > > In this quickstart, you download and run a code sample that demonstrates how a native iOS or macOS application can sign in users and get an access token to call the Microsoft Graph API. @@ -47,16 +47,18 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > #### Step 1: Configure your application > For the code sample in this quickstart to work, add a **Redirect URI** compatible with the Auth broker. > -> +> > > > [!div id="appconfigured" class="alert alert-info"] > > ![Already configured](media/quickstart-v2-ios/green-check.png) Your application is configured with these attributes > > #### Step 2: Download the sample project > -> -> -> +> > [!div class="nextstepaction"] +> > +> +> > [!div class="nextstepaction"] +> > > > #### Step 3: Install dependencies > @@ -238,4 +240,4 @@ ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "language > Move on to the step-by-step tutorial in which you build an iOS or macOS app that gets an access token from the Microsoft identity platform and uses it to call the Microsoft Graph API. > > > [!div class="nextstepaction"] -> > [Tutorial: Sign in users and call Microsoft Graph from an iOS or macOS app](tutorial-v2-ios.md) \ No newline at end of file +> > [Tutorial: Sign in users and call Microsoft Graph from an iOS or macOS app](tutorial-v2-ios.md) diff --git a/articles/active-directory/develop/msal-js-sso.md b/articles/active-directory/develop/msal-js-sso.md index 8ba97653b94c..36f68efcf2b9 100644 --- a/articles/active-directory/develop/msal-js-sso.md +++ b/articles/active-directory/develop/msal-js-sso.md @@ -19,20 +19,18 @@ ms.custom: aaddev, has-adal-ref # Single sign-on with MSAL.js -Single sign-on (SSO) provides a more seamless experience by reducing the number of times your users are asked for their credentials. Users enter their credentials once, and the established session can be reused by other applications on the device without further prompting. +Single sign-on (SSO) provides a more seamless experience by reducing the number of times your users are asked for their credentials. Users enter their credentials once, and the established session can be reused by other applications on the device without further prompting. -Azure Active Directory (Azure AD) enables SSO by setting a session cookie when a user first authenticates. MSAL.js allows use of the session cookie for SSO between the browser tabs opened for one or several applications. +Azure Active Directory (Azure AD) enables SSO by setting a session cookie when a user authenticates for the first time. MSAL.js allows the usage of the session cookie for SSO between the browser tabs opened for one or several applications. -## SSO between browser tabs +## SSO between browser tabs for the same app -When a user has your application open in several tabs and signs in on one of them, they're signed into the same app open on the other tabs without being prompted. MSAL.js caches the ID token for the user in the browser `localStorage` and will sign the user in to the application on the other open tabs. - -By default, MSAL.js uses `sessionStorage`, which doesn't allow the session to be shared between tabs. To get SSO between tabs, make sure to set the `cacheLocation` in MSAL.js to `localStorage` as shown below. +When a user has your application open in several tabs and signs in on one of them, they can be signed into the same app open on the other tabs without being prompted. To do so, you'll need to set the *cacheLocation* in MSAL.js configuration object to `localStorage` as shown below. ```javascript const config = { auth: { - clientId: "abcd-ef12-gh34-ikkl-ashdjhlhsdg", + clientId: "1111-2222-3333-4444-55555555", }, cache: { cacheLocation: "localStorage", @@ -42,61 +40,65 @@ const config = { const msalInstance = new msal.PublicClientApplication(config); ``` -## SSO between apps - -When a user authenticates, a session cookie is set on the Azure AD domain in the browser. MSAL.js relies on this session cookie to provide SSO for the user between different applications. MSAL.js also caches the ID tokens and access tokens of the user in the browser storage per application domain. As a result, the SSO behavior varies for different cases: - -### Applications on the same domain - -When applications are hosted on the same domain, the user can sign into an app once and then get authenticated to the other apps without a prompt. MSAL.js uses the tokens cached for the user on the domain to provide SSO. - -### Applications on different domain - -When applications are hosted on different domains, the tokens cached on domain A cannot be accessed by MSAL.js in domain B. - -When a user signed in on domain A navigates to an application on domain B, they're typically redirected or prompted to sign in. Because Azure AD still has the user's session cookie, it signs in the user without prompting for credentials. +## SSO between different apps -If the user has multiple user accounts in a session with Azure AD, the user is prompted to pick an account to sign in with. +When a user authenticates, a session cookie is set on the Azure AD domain in the browser. MSAL.js relies on this session cookie to provide SSO for the user between different applications. MSAL.js also caches the ID tokens and access tokens of the user in the browser storage per application domain. -### Automatic account selection +MSAL.js offers the `ssoSilent` method to sign-in the user and obtain tokens without an interaction. However, if the user has multiple user accounts in a session with Azure AD, then the user is prompted to pick an account to sign in with. As such, there are two ways to achieve SSO using `ssoSilent` method. -When a user is signed in concurrently to multiple Azure AD accounts on the same device, you might find you have the need to bypass the account selection prompt. +### With user hint -**Using a session ID** +To improve performance and ensure that the authorization server will look for the correct account session. You can pass one of the following options in the request object of the `ssoSilent` method to obtain the token silently. -Use the session ID (SID) in silent authentication requests you make with `acquireTokenSilent` in MSAL.js. +- Session ID `sid` (which can be retrieved from `idTokenClaims` of an `account` object) +- `login_hint` (which can be retrieved from the `account` object username property or the `upn` claim in the ID token) +- `account` (which can be retrieved from using one the [account methods](https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-browser/docs/login-user.md#account-apis)) -To use a SID, add `sid` as an [optional claim](active-directory-optional-claims.md) to your app's ID tokens. The `sid` claim allows an application to identify a user's Azure AD session independent of their account name or username. To learn how to add optional claims like `sid`, see [Provide optional claims to your app](active-directory-optional-claims.md). +#### Using a session ID -The SID is bound to the session cookie and won't cross browser contexts. You can use the SID only with `acquireTokenSilent`. +To use a session ID, add `sid` as an [optional claim](active-directory-optional-claims.md) to your app's ID tokens. The `sid` claim allows an application to identify a user's Azure AD session independent of their account name or username. To learn how to add optional claims like `sid`, see [Provide optional claims to your app](active-directory-optional-claims.md). Use the session ID (SID) in silent authentication requests you make with `ssoSilent` in MSAL.js. ```javascript -var request = { +const request = { scopes: ["user.read"], sid: sid, }; - msalInstance.acquireTokenSilent(request) - .then(function (response) { - const token = response.accessToken; - }) - .catch(function (error) { - //handle error - }); + try { + const loginResponse = await msalInstance.ssoSilent(request); +} catch (err) { + if (err instanceof InteractionRequiredAuthError) { + const loginResponse = await msalInstance.loginPopup(request).catch(error => { + // handle error + }); + } else { + // handle error + } +} ``` -**Using a login hint** +#### Using a login hint To bypass the account selection prompt typically shown during interactive authentication requests (or for silent requests when you haven't configured the `sid` optional claim), provide a `loginHint`. In multi-tenant applications, also include a `domain_hint`. ```javascript -var request = { +const request = { scopes: ["user.read"], loginHint: preferred_username, extraQueryParameters: { domain_hint: "organizations" }, }; - msalInstance.loginRedirect(request); +try { + const loginResponse = await msalInstance.ssoSilent(request); +} catch (err) { + if (err instanceof InteractionRequiredAuthError) { + const loginResponse = await msalInstance.loginPopup(request).catch(error => { + // handle error + }); + } else { + // handle error + } +} ``` Get the values for `loginHint` and `domain_hint` from the user's **ID token**: @@ -107,34 +109,83 @@ Get the values for `loginHint` and `domain_hint` from the user's **ID token**: For more information about login hint and domain hint, see [Microsoft identity platform and OAuth 2.0 authorization code flow](v2-oauth2-auth-code-flow.md). -## SSO without MSAL.js login +#### Using an account object -By design, MSAL.js requires that a login method is called to establish a user context before getting tokens for APIs. Since login methods are interactive, the user sees a prompt. +If you know the user account information, you can also retrieve the user account by using the `getAccountByUsername()` or `getAccountByHomeId()` methods: -There are certain cases in which applications have access to the authenticated user's context or ID token through authentication initiated in another application and want to use SSO to acquire tokens without first signing in through MSAL.js. +```javascript +const username = "test@contoso.com"; +const myAccount = msalInstance.getAccountByUsername(username); + +const request = { + scopes: ["User.Read"], + account: myAccount +}; -An example: A user is signed in to Microsoft account in a browser that hosts another JavaScript application running as an add-on or plugin, which requires a Microsoft account sign-in. +try { + const loginResponse = await msalInstance.ssoSilent(request); +} catch (err) { + if (err instanceof InteractionRequiredAuthError) { + const loginResponse = await msalInstance.loginPopup(request).catch(error => { + // handle error + }); + } else { + // handle error + } +} +``` -The SSO experience in this scenario can be achieved as follows: +### Without user hint -Pass the `sid` if available (or `login_hint` and optionally `domain_hint`) as request parameters to the MSAL.js `acquireTokenSilent` call as follows: +You can attempt to use the `ssoSilent` method without passing any `account`, `sid` or `login_hint` as shown in the code below: ```javascript -var request = { - scopes: ["user.read"], - loginHint: preferred_username, - extraQueryParameters: { domain_hint: "organizations" }, +const request = { + scopes: ["User.Read"] }; -msalInstance.acquireTokenSilent(request) - .then(function (response) { - const token = response.accessToken; - }) - .catch(function (error) { - //handle error - }); +try { + const loginResponse = await msalInstance.ssoSilent(request); +} catch (err) { + if (err instanceof InteractionRequiredAuthError) { + const loginResponse = await msalInstance.loginPopup(request).catch(error => { + // handle error + }); + } else { + // handle error + } +} ``` +However, there's a likelihood of silent sign-in errors if the application has multiple users in a single browser session or if the user has multiple accounts for that single browser session. You may see the following error in the case of multiple accounts: + +```txt +InteractionRequiredAuthError: interaction_required: AADSTS16000: Either multiple user identities are available for the current request or selected account is not supported for the scenario. +``` + +The error indicates that the server couldn't determine which account to sign into, and will require either one of the parameters above (`account`, `login_hint`, `sid`) or an interactive sign-in to choose the account. + +## Considerations when using `ssoSilent` + +### Redirect URI (reply URL) + +For better performance and to help avoid issues, set the `redirectUri` to a blank page or other page that doesn't use MSAL. + +- If your application users only popup and silent methods, set the `redirectUri` on the `PublicClientApplication` configuration object. +- If your application also uses redirect methods, set the `redirectUri` on a per-request basis. + +### Third-party cookies + +`ssoSilent` attempts to open a hidden iframe and reuse an existing session with Azure AD. This won't work in browsers that block third-party cookies such as safari, and will lead to an interaction error: + +```txt +InteractionRequiredAuthError: login_required: AADSTS50058: A silent sign-in request was sent but no user is signed in. The cookies used to represent the user's session were not sent in the request to Azure AD +``` + +To resolve the error, the user must create an interactive authentication request using the `loginPopup()` or `loginRedirect()`. + +Additionally, the request object is required when using the **silent** methods. If you already have the user's sign-in information, you can pass either the `loginHint` or `sid` optional parameters to sign-in a specific account. + ## SSO in ADAL.js to MSAL.js update MSAL.js brings feature parity with ADAL.js for Azure AD authentication scenarios. To make the migration from ADAL.js to MSAL.js easy and to avoid prompting your users to sign in again, the library reads the ID token representing user’s session in ADAL.js cache, and seamlessly signs in the user in MSAL.js. @@ -145,7 +196,7 @@ To take advantage of the SSO behavior when updating from ADAL.js, you'll need to // In ADAL.js window.config = { - clientId: "g075edef-0efa-453b-997b-de1337c29185", + clientId: "1111-2222-3333-4444-55555555", cacheLocation: "localStorage", }; @@ -154,7 +205,7 @@ var authContext = new AuthenticationContext(config); // In latest MSAL.js version const config = { auth: { - clientId: "abcd-ef12-gh34-ikkl-ashdjhlhsdg", + clientId: "1111-2222-3333-4444-55555555", }, cache: { cacheLocation: "localStorage", @@ -170,5 +221,6 @@ Once the `cacheLocation` is configured, MSAL.js can read the cached state of the For more information about SSO, see: -- [Single Sign-On SAML protocol](single-sign-on-saml-protocol.md) +- [Single Sign-on SAML protocol](single-sign-on-saml-protocol.md) +- [Optional token claims](active-directory-optional-claims.md) - [Configurable token lifetimes](active-directory-configurable-token-lifetimes.md) diff --git a/articles/active-directory/develop/refresh-tokens.md b/articles/active-directory/develop/refresh-tokens.md index 22a7ca457ba9..7e967d8309f8 100644 --- a/articles/active-directory/develop/refresh-tokens.md +++ b/articles/active-directory/develop/refresh-tokens.md @@ -29,7 +29,10 @@ Before reading through this article, it's recommended that you go through the fo ## Refresh token lifetime -Refresh tokens have a longer lifetime than access tokens. The default lifetime for the tokens is 90 days and they replace themselves with a fresh token upon every use. As such, whenever a refresh token is used to acquire a new access token, a new refresh token is also issued. The Microsoft identity platform doesn't revoke old refresh tokens when used to fetch new access tokens. Securely delete the old refresh token after acquiring a new one. Refresh tokens need to be stored safely like access tokens or application credentials. +Refresh tokens have a longer lifetime than access tokens. The default lifetime for the refresh tokens is 24 hours for [single page apps](reference-third-party-cookies-spas.md) and 90 days for all other scenarios. Refresh tokens replace themselves with a fresh token upon every use. The Microsoft identity platform doesn't revoke old refresh tokens when used to fetch new access tokens. Securely delete the old refresh token after acquiring a new one. Refresh tokens need to be stored safely like access tokens or application credentials. + +>[!IMPORTANT] +> Refresh tokens sent to a redirect URI registered as `spa` expire after 24 hours. Additional refresh tokens acquired using the initial refresh token carry over that expiration time, so apps must be prepared to rerun the authorization code flow using an interactive authentication to get a new refresh token every 24 hours. Users do not have to enter their credentials and usually don't even see any related user experience, just a reload of your application. The browser must visit the log-in page in a top-level frame to show the login session. This is due to [privacy features in browsers that block third party cookies](reference-third-party-cookies-spas.md). ## Refresh token expiration diff --git a/articles/active-directory/develop/scenario-spa-sign-in.md b/articles/active-directory/develop/scenario-spa-sign-in.md index 41406d638a48..39dfaec5f86d 100644 --- a/articles/active-directory/develop/scenario-spa-sign-in.md +++ b/articles/active-directory/develop/scenario-spa-sign-in.md @@ -28,7 +28,7 @@ Before you can get tokens to access APIs in your application, you need an authen You can also optionally pass the scopes of the APIs for which you need the user to consent at the time of sign-in. > [!NOTE] -> If your application already has access to an authenticated user context or ID token, you can skip the login step and directly acquire tokens. For details, see [SSO without MSAL.js login](msal-js-sso.md#sso-without-msaljs-login). +> If your application already has access to an authenticated user context or ID token, you can skip the login step and directly acquire tokens. For details, see [SSO with user hint](msal-js-sso.md#with-user-hint). ## Choosing between a pop-up or redirect experience diff --git a/articles/active-directory/develop/scenario-web-app-sign-user-app-configuration.md b/articles/active-directory/develop/scenario-web-app-sign-user-app-configuration.md index b9a59a676984..1cdfa3235316 100644 --- a/articles/active-directory/develop/scenario-web-app-sign-user-app-configuration.md +++ b/articles/active-directory/develop/scenario-web-app-sign-user-app-configuration.md @@ -180,31 +180,15 @@ In the Azure portal, the reply URIs that you register on the **Authentication** # [Node.js](#tab/nodejs) -Here, the configuration parameters reside in `index.js` +Here, the configuration parameters reside in *.env* as environment variables: -```javascript +:::code language="text" source="~/ms-identity-node/App/.env"::: -const REDIRECT_URI = "http://localhost:3000/redirect"; +These parameters are used to create a configuration object in *authConfig.js* file, which will eventually be used to initialize MSAL Node: -const config = { - auth: { - clientId: "Enter_the_Application_Id_Here", - authority: "https://login.microsoftonline.com/Enter_the_Tenant_Info_Here/", - clientSecret: "Enter_the_Client_Secret_Here" - }, - system: { - loggerOptions: { - loggerCallback(loglevel, message, containsPii) { - console.log(message); - }, - piiLoggingEnabled: false, - logLevel: msal.LogLevel.Verbose, - } - } -}; -``` +:::code language="js" source="~/ms-identity-node/App/authConfig.js"::: -In the Azure portal, the reply URIs that you register on the Authentication page for your application need to match the redirectUri instances that the application defines (`http://localhost:3000/redirect`). +In the Azure portal, the reply URIs that you register on the Authentication page for your application need to match the redirectUri instances that the application defines (`http://localhost:3000/auth/redirect`). > [!NOTE] > This quickstart proposes to store the client secret in the configuration file for simplicity. In your production app, you'd want to use other ways to store your secret, such as a key vault or an environment variable. @@ -350,12 +334,9 @@ For details about the authorization code flow that this method triggers, see the # [Node.js](#tab/nodejs) -```javascript -const msal = require('@azure/msal-node'); +Node sample the Express framework. MSAL is initialized in *auth* route handler: -// Create msal application object -const cca = new msal.ConfidentialClientApplication(config); -``` +:::code language="js" source="~/ms-identity-node/App/routes/auth.js" range="6-16"::: # [Python](#tab/python) diff --git a/articles/active-directory/develop/scenario-web-app-sign-user-app-registration.md b/articles/active-directory/develop/scenario-web-app-sign-user-app-registration.md index 2e7f4999fc08..55ed8684248f 100644 --- a/articles/active-directory/develop/scenario-web-app-sign-user-app-registration.md +++ b/articles/active-directory/develop/scenario-web-app-sign-user-app-registration.md @@ -96,8 +96,8 @@ By default, the sample uses: 1. When the **Register an application page** appears, enter your application's registration information: 1. Enter a **Name** for your application, for example `node-webapp`. Users of your app might see this name, and you can change it later. - 1. Change **Supported account types** to **Accounts in any organizational directory and personal Microsoft accounts (e.g. Skype, Xbox, Outlook.com)**. - 1. In the **Redirect URI (optional)** section, select **Web** in the combo box and enter the following redirect URI: `http://localhost:3000/redirect`. + 1. Change **Supported account types** to **Accounts in this organizational directory only**. + 1. In the **Redirect URI (optional)** section, select **Web** in the combo box and enter the following redirect URI: `http://localhost:3000/auth/redirect`. 1. Select **Register** to create the application. 1. On the app's **Overview** page, find the **Application (client) ID** value and record it for later. You'll need it to configure the configuration file for this project. 1. Under **Manage**, select **Certificates & secrets**. diff --git a/articles/active-directory/develop/scenario-web-app-sign-user-sign-in.md b/articles/active-directory/develop/scenario-web-app-sign-user-sign-in.md index 9e70e56361a4..00ab03465cd4 100644 --- a/articles/active-directory/develop/scenario-web-app-sign-user-sign-in.md +++ b/articles/active-directory/develop/scenario-web-app-sign-user-sign-in.md @@ -72,7 +72,7 @@ else # [Java](#tab/java) -In our Java quickstart, the sign-in button is located in the [main/resources/templates/index.html](https://github.com/Azure-Samples/ms-identity-java-webapp/blob/master/msal-java-webapp-sample/src/main/resources/templates/index.html) file. +In the Java quickstart, the sign-in button is located in the [main/resources/templates/index.html](https://github.com/Azure-Samples/ms-identity-java-webapp/blob/master/msal-java-webapp-sample/src/main/resources/templates/index.html) file. ```html @@ -94,13 +94,13 @@ In our Java quickstart, the sign-in button is located in the [main/resources/tem # [Node.js](#tab/nodejs) -In the Node.js quickstart, there's no sign-in button. The code-behind automatically prompts the user for sign-in when it's reaching the root of the web app. +In the Node.js quickstart, the code for the sign-in button is located in *index.hbs* template file. -```javascript -app.get('/', (req, res) => { - // authentication logic -}); -``` +:::code language="hbs" source="~/ms-identity-node/App/views/index.hbs" range="10-11"::: + +This template is served via the main (index) route of the app: + +:::code language="js" source="~/ms-identity-node/App/routes/index.js" range="6-15"::: # [Python](#tab/python) @@ -169,40 +169,9 @@ public class AuthPageController { # [Node.js](#tab/nodejs) -Unlike other platforms, here the MSAL Node takes care of letting the user sign in from the login page. - -```javascript - -// 1st leg of auth code flow: acquire a code -app.get('/', (req, res) => { - const authCodeUrlParameters = { - scopes: ["user.read"], - redirectUri: REDIRECT_URI, - }; - - // get url to sign user in and consent to scopes needed for application - pca.getAuthCodeUrl(authCodeUrlParameters).then((response) => { - res.redirect(response); - }).catch((error) => console.log(JSON.stringify(error))); -}); - -// 2nd leg of auth code flow: exchange code for token -app.get('/redirect', (req, res) => { - const tokenRequest = { - code: req.query.code, - scopes: ["user.read"], - redirectUri: REDIRECT_URI, - }; - - pca.acquireTokenByCode(tokenRequest).then((response) => { - console.log("\nResponse: \n:", response); - res.sendStatus(200); - }).catch((error) => { - console.log(error); - res.status(500).send(error); - }); -}); -``` +When the user selects the **Sign in** link, which triggers the `/auth/signin` route, the sign-in controller takes over to authenticate the user with Microsoft identity platform. + +:::code language="js" source="~/ms-identity-node/App/routes/auth.js" range="27-107, 135-161"::: # [Python](#tab/python) @@ -355,7 +324,7 @@ In our Java quickstart, the sign-out button is located in the main/resources/tem # [Node.js](#tab/nodejs) -This sample application does not implement sign-out. +:::code language="hbs" source="~/ms-identity-node/App/views/index.hbs" range="2, 8"::: # [Python](#tab/python) @@ -431,7 +400,9 @@ In Java, sign-out is handled by calling the Microsoft identity platform `logout` # [Node.js](#tab/nodejs) -This sample application does not implement sign-out. +When the user selects the **Sign out** button, the app triggers the `/signout` route, which destroys the session and redirects the browser to Microsoft identity platform sign-out endpoint. + +:::code language="js" source="~/ms-identity-node/App/routes/auth.js" range="163-174"::: # [Python](#tab/python) @@ -479,7 +450,7 @@ In the Java quickstart, the post-logout redirect URI just displays the index.htm # [Node.js](#tab/nodejs) -This sample application does not implement sign-out. +In the Node quickstart, the post-logout redirect URI is used to redirect the browser back to sample home page after the user completes the logout process with the Microsoft identity platform. # [Python](#tab/python) @@ -494,4 +465,4 @@ If you want to learn more about sign-out, read the protocol documentation that's ## Next steps Move on to the next article in this scenario, -[Move to production](scenario-web-app-sign-user-production.md). \ No newline at end of file +[Move to production](scenario-web-app-sign-user-production.md). diff --git a/articles/active-directory/develop/tutorial-v2-ios.md b/articles/active-directory/develop/tutorial-v2-ios.md index 657e5b2399bd..e8f6b1bf542d 100644 --- a/articles/active-directory/develop/tutorial-v2-ios.md +++ b/articles/active-directory/develop/tutorial-v2-ios.md @@ -1,16 +1,13 @@ --- -title: "Tutorial: Create an iOS or macOS app that uses the Microsoft identity platform for authentication | Azure" -titleSuffix: Microsoft identity platform -description: In this tutorial, you build an iOS or macOS app that uses the Microsoft identity platform to sign in users and get an access token to call the Microsoft Graph API on their behalf. -services: active-directory +title: "Tutorial: Create an iOS or macOS app that uses the Microsoft identity platform for authentication" +description: Build an iOS or macOS app that uses the Microsoft identity platform to sign in users and get an access token to call the Microsoft Graph API on their behalf. author: mmacy manager: CelesteDG ms.service: active-directory ms.subservice: develop ms.topic: tutorial -ms.workload: identity -ms.date: 09/18/2020 +ms.date: 05/28/2022 ms.author: marsma ms.reviewer: oldalton ms.custom: aaddev, identityplatformtop40, has-adal-ref @@ -20,7 +17,7 @@ ms.custom: aaddev, identityplatformtop40, has-adal-ref In this tutorial, you build an iOS or macOS app that integrates with the Microsoft identity platform to sign users and get an access token to call the Microsoft Graph API. -When you've completed the guide, your application will accept sign-ins of personal Microsoft accounts (including outlook.com, live.com, and others) and work or school accounts from any company or organization that uses Azure Active Directory. This tutorial is applicable to both iOS and macOS apps. Some steps are different between the two platforms. +When you've completed the tutorial, your application will accept sign-ins of personal Microsoft accounts (including outlook.com, live.com, and others) and work or school accounts from any company or organization that uses Azure Active Directory. This tutorial is applicable to both iOS and macOS apps. Some steps are different between the two platforms. In this tutorial: @@ -75,8 +72,8 @@ If you'd like to download a completed version of the app you build in this tutor 1. Select **Accounts in any organizational directory (Any Azure AD directory - Multitenant) and personal Microsoft accounts (e.g. Skype, Xbox)** under **Supported account types**. 1. Select **Register**. 1. Under **Manage**, select **Authentication** > **Add a platform** > **iOS/macOS**. -1. Enter your project's Bundle ID. If you downloaded the code, this is `com.microsoft.identitysample.MSALiOS`. If you're creating your own project, select your project in Xcode and open the **General** tab. The bundle identifier appears in the **Identity** section. -1. Select **Configure** and save the **MSAL Configuration** that appears in the **MSAL configuration** page so you can enter it when you configure your app later. +1. Enter your project's Bundle ID. If downloaded the code sample, the Bundle ID is `com.microsoft.identitysample.MSALiOS`. If you're creating your own project, select your project in Xcode and open the **General** tab. The bundle identifier appears in the **Identity** section. +1. Select **Configure** and save the **MSAL Configuration** that appears in the **MSAL configuration** page so you can enter it when you configure your app later. 1. Select **Done**. ## Add MSAL @@ -85,7 +82,7 @@ Choose one of the following ways to install the MSAL library in your app: ### CocoaPods -1. If you're using [CocoaPods](https://cocoapods.org/), install `MSAL` by first creating an empty file called `podfile` in the same folder as your project's `.xcodeproj` file. Add the following to `podfile`: +1. If you're using [CocoaPods](https://cocoapods.org/), install `MSAL` by first creating an empty file called _podfile_ in the same folder as your project's _.xcodeproj_ file. Add the following to _podfile_: ``` use_frameworks! @@ -96,18 +93,18 @@ Choose one of the following ways to install the MSAL library in your app: ``` 2. Replace `` with the name of your project. -3. In a terminal window, navigate to the folder that contains the `podfile` you created and run `pod install` to install the MSAL library. +3. In a terminal window, navigate to the folder that contains the _podfile_ you created and run `pod install` to install the MSAL library. 4. Close Xcode and open `.xcworkspace` to reload the project in Xcode. ### Carthage -If you're using [Carthage](https://github.com/Carthage/Carthage), install `MSAL` by adding it to your `Cartfile`: +If you're using [Carthage](https://github.com/Carthage/Carthage), install `MSAL` by adding it to your _Cartfile_: ``` github "AzureAD/microsoft-authentication-library-for-objc" "master" ``` -From a terminal window, in the same directory as the updated `Cartfile`, run the following command to have Carthage update the dependencies in your project. +From a terminal window, in the same directory as the updated _Cartfile_, run the following command to have Carthage update the dependencies in your project. iOS: @@ -129,13 +126,13 @@ You can also use Git Submodule, or check out the latest release to use as a fram Next, we'll add your app registration to your code. -First, add the following import statement to the top of the `ViewController.swift`, as well as `AppDelegate.swift` or `SceneDelegate.swift` files: +First, add the following import statement to the top of the _ViewController.swift_ file and either _AppDelegate.swift_ or _SceneDelegate.swift_: ```swift import MSAL ``` -Then Add the following code to `ViewController.swift` prior to `viewDidLoad()`: +Next, add the following code to _ViewController.swift_ before to `viewDidLoad()`: ```swift // Update the below to your client ID you received in the portal. The below is for running the demo only @@ -151,7 +148,7 @@ var webViewParameters : MSALWebviewParameters? var currentAccount: MSALAccount? ``` -The only value you modify above is the value assigned to `kClientID`to be your [Application ID](./developer-glossary.md#application-id-client-id). This value is part of the MSAL Configuration data that you saved during the step at the beginning of this tutorial to register the application in the Azure portal. +The only value you modify above is the value assigned to `kClientID` to be your [Application ID](./developer-glossary.md#application-client-id). This value is part of the MSAL Configuration data that you saved during the step at the beginning of this tutorial to register the application in the Azure portal. ## Configure Xcode project settings @@ -161,9 +158,9 @@ Add a new keychain group to your project **Signing & Capabilities**. The keychai ## For iOS only, configure URL schemes -In this step, you will register `CFBundleURLSchemes` so that the user can be redirected back to the app after sign in. By the way, `LSApplicationQueriesSchemes` also allows your app to make use of Microsoft Authenticator. +In this step, you'll register `CFBundleURLSchemes` so that the user can be redirected back to the app after sign in. By the way, `LSApplicationQueriesSchemes` also allows your app to make use of Microsoft Authenticator. -In Xcode, open `Info.plist` as a source code file, and add the following inside of the `` section. Replace `[BUNDLE_ID]` with the value you used in the Azure portal. If you downloaded the code, the bundle identifier is `com.microsoft.identitysample.MSALiOS`. If you're creating your own project, select your project in Xcode and open the **General** tab. The bundle identifier appears in the **Identity** section. +In Xcode, open _Info.plist_ as a source code file, and add the following inside of the `` section. Replace `[BUNDLE_ID]` with the value you used in the Azure portal. If you downloaded the code, the bundle identifier is `com.microsoft.identitysample.MSALiOS`. If you're creating your own project, select your project in Xcode and open the **General** tab. The bundle identifier appears in the **Identity** section. ```xml CFBundleURLTypes @@ -189,7 +186,7 @@ In Xcode, open `Info.plist` as a source code file, and add the following inside ## Create your app's UI -Now create a UI that includes a button to call the Microsoft Graph API, another to sign out, and a text view to see some output by adding the following code to the `ViewController`class: +Now create a UI that includes a button to call the Microsoft Graph API, another to sign out, and a text view to see some output by adding the following code to the `ViewController` class: ### iOS UI @@ -372,7 +369,7 @@ Next, also inside the `ViewController` class, replace the `viewDidLoad()` method ### Initialize MSAL -Add the following `initMSAL` method to the `ViewController` class: +To the `ViewController` class, add the `initMSAL` method: ```swift func initMSAL() throws { @@ -390,7 +387,7 @@ Add the following `initMSAL` method to the `ViewController` class: } ``` -Add the following after `initMSAL` method to the `ViewController` class. +Still in the `ViewController` class and after the `initMSAL` method, add the `initWebViewParams` method: ### iOS code: @@ -408,9 +405,9 @@ func initWebViewParams() { } ``` -### For iOS only, handle the sign-in callback +### Handle the sign-in callback (iOS only) -Open the `AppDelegate.swift` file. To handle the callback after sign-in, add `MSALPublicClientApplication.handleMSALResponse` to the `appDelegate` class like this: +Open the _AppDelegate.swift_ file. To handle the callback after sign-in, add `MSALPublicClientApplication.handleMSALResponse` to the `appDelegate` class like this: ```swift // Inside AppDelegate... @@ -421,7 +418,7 @@ func application(_ app: UIApplication, open url: URL, options: [UIApplication.Op ``` -**If you are using Xcode 11**, you should place MSAL callback into the `SceneDelegate.swift` instead. +**If you are using Xcode 11**, you should place MSAL callback into the _SceneDelegate.swift_ instead. If you support both UISceneDelegate and UIApplicationDelegate for compatibility with older iOS, MSAL callback would need to be placed into both files. ```swift @@ -442,9 +439,9 @@ func scene(_ scene: UIScene, openURLContexts URLContexts: Set) Now, we can implement the application's UI processing logic and get tokens interactively through MSAL. -MSAL exposes two primary methods for getting tokens: `acquireTokenSilently()` and `acquireTokenInteractively()`: +MSAL exposes two primary methods for getting tokens: `acquireTokenSilently()` and `acquireTokenInteractively()`. -- `acquireTokenSilently()` attempts to sign in a user and get tokens without any user interaction as long as an account is present. `acquireTokenSilently()` requires providing a valid `MSALAccount` which can be retrieved by using one of MSAL account enumeration APIs. This sample uses `applicationContext.getCurrentAccount(with: msalParameters, completionBlock: {})` to retrieve current account. +- `acquireTokenSilently()` attempts to sign in a user and get tokens without user interaction as long as an account is present. `acquireTokenSilently()` require a valid `MSALAccount` which can be retrieved by using one of MSAL's account enumeration APIs. This tutorial uses `applicationContext.getCurrentAccount(with: msalParameters, completionBlock: {})` to retrieve the current account. - `acquireTokenInteractively()` always shows UI when attempting to sign in the user. It may use session cookies in the browser or an account in the Microsoft authenticator to provide an interactive-SSO experience. @@ -513,7 +510,7 @@ Add the following code to the `ViewController` class: #### Get a token interactively -The following code snippet gets a token for the first time by creating an `MSALInteractiveTokenParameters` object and calling `acquireToken`. Next you will add code that: +The following code snippet gets a token for the first time by creating an `MSALInteractiveTokenParameters` object and calling `acquireToken`. Next you'll add code that: 1. Creates `MSALInteractiveTokenParameters` with scopes. 2. Calls `acquireToken()` with the created parameters. @@ -812,7 +809,7 @@ Add the following helper methods to the `ViewController` class to complete the s } ``` -### For iOS only, get additional device information +### iOS only: get additional device information Use following code to read current device configuration, including whether device is configured as shared: @@ -839,13 +836,13 @@ Use following code to read current device configuration, including whether devic ### Multi-account applications -This app is built for a single account scenario. MSAL also supports multi-account scenarios, but it requires some additional work from apps. You will need to create UI to help users select which account they want to use for each action that requires tokens. Alternatively, your app can implement a heuristic to select which account to use by querying all accounts from MSAL. For example, see `accountsFromDeviceForParameters:completionBlock:` [API](https://azuread.github.io/microsoft-authentication-library-for-objc/Classes/MSALPublicClientApplication.html#/c:objc(cs)MSALPublicClientApplication(im)accountsFromDeviceForParameters:completionBlock:) +This app is built for a single account scenario. MSAL also supports multi-account scenarios, but it requires more application work. You'll need to create UI to help users select which account they want to use for each action that requires tokens. Alternatively, your app can implement a heuristic to select which account to use by querying all accounts from MSAL. For example, see `accountsFromDeviceForParameters:completionBlock:` [API](https://azuread.github.io/microsoft-authentication-library-for-objc/Classes/MSALPublicClientApplication.html#/c:objc(cs)MSALPublicClientApplication(im)accountsFromDeviceForParameters:completionBlock:) ## Test your app Build and deploy the app to a test device or simulator. You should be able to sign in and get tokens for Azure AD or personal Microsoft accounts. -The first time a user signs into your app, they will be prompted by Microsoft identity to consent to the permissions requested. While most users are capable of consenting, some Azure AD tenants have disabled user consent, which requires admins to consent on behalf of all users. To support this scenario, register your app's scopes in the Azure portal. +The first time a user signs into your app, they'll be prompted by Microsoft identity to consent to the permissions requested. While most users are capable of consenting, some Azure AD tenants have disabled user consent, which requires admins to consent on behalf of all users. To support this scenario, register your app's scopes in the Azure portal. After you sign in, the app will display the data returned from the Microsoft Graph `/me` endpoint. diff --git a/articles/active-directory/develop/tutorial-v2-nodejs-webapp-msal.md b/articles/active-directory/develop/tutorial-v2-nodejs-webapp-msal.md index 757a04d17846..fe934b7e9cd0 100644 --- a/articles/active-directory/develop/tutorial-v2-nodejs-webapp-msal.md +++ b/articles/active-directory/develop/tutorial-v2-nodejs-webapp-msal.md @@ -13,9 +13,9 @@ ms.date: 02/17/2021 ms.author: marsma --- -# Tutorial: Sign in users in a Node.js & Express web app +# Tutorial: Sign in users and acquire a token for Microsoft Graph in a Node.js & Express web app -In this tutorial, you build a web app that signs-in users. The web app you build uses the [Microsoft Authentication Library (MSAL) for Node](https://github.com/AzureAD/microsoft-authentication-library-for-js/tree/dev/lib/msal-node). +In this tutorial, you build a web app that signs-in users and acquires access tokens for calling Microsoft Graph. The web app you build uses the [Microsoft Authentication Library (MSAL) for Node](https://github.com/AzureAD/microsoft-authentication-library-for-js/tree/dev/lib/msal-node). Follow the steps in this tutorial to: @@ -39,151 +39,175 @@ First, complete the steps in [Register an application with the Microsoft identit Use the following settings for your app registration: - Name: `ExpressWebApp` (suggested) -- Supported account types: **Accounts in any organizational directory (Any Azure AD directory - Multitenant) and personal Microsoft accounts (e.g. Skype, Xbox)** +- Supported account types: **Accounts in this organizational directory only** - Platform type: **Web** -- Redirect URI: `http://localhost:3000/redirect` +- Redirect URI: `http://localhost:3000/auth/redirect` - Client secret: `*********` (record this value for use in a later step - it's shown only once) ## Create the project -Create a folder to host your application, for example *ExpressWebApp*. +Use the [Express application generator tool](https://expressjs.com/en/starter/generator.html) to create an application skeleton. -1. First, change to your project directory in your terminal and then run the following `npm` commands: +1. First, install the [express-generator](https://www.npmjs.com/package/express-generator) package: ```console - npm init -y - npm install --save express + npm install -g express-generator ``` -2. Next, create file named *index.js* and add the following code: - -```JavaScript - const express = require("express"); - const msal = require('@azure/msal-node'); - - const SERVER_PORT = process.env.PORT || 3000; - - // Create Express App and Routes - const app = express(); - - app.listen(SERVER_PORT, () => console.log(`Msal Node Auth Code Sample app listening on port ${SERVER_PORT}!`)) +2. Then, create an application skeleton as follows: + +```console + express --view=hbs /ExpressWebApp && cd /ExpressWebApp + npm install ``` -You now have a simple web server running on port 3000. The file and folder structure of your project should look similar to the following: +You now have a simple Express web app. The file and folder structure of your project should look similar to the following: ``` ExpressWebApp/ -├── index.js +├── bin/ +| └── wwww +├── public/ +| ├── images/ +| ├── javascript/ +| └── stylesheets/ +| └── style.css +├── routes/ +| ├── index.js +| └── users.js +├── views/ +| ├── error.hbs +| ├── index.hbs +| └── layout.hbs +├── app.js └── package.json ``` ## Install the auth library -Locate the root of your project directory in a terminal and install the MSAL Node package via NPM. +Locate the root of your project directory in a terminal and install the MSAL Node package via npm. ```console npm install --save @azure/msal-node ``` -## Add app registration details +## Install other dependencies + +The web app sample in this tutorial uses the [express-session](https://www.npmjs.com/package/express-session) package for session management, [dotenv](https://www.npmjs.com/package/dotenv) package for reading environment parameters during development, and [axios](https://www.npmjs.com/package/axios) for making network calls to the Microsoft Graph API. Install these via npm: -In the *index.js* file you've created earlier, add the following code: - -```JavaScript - // Before running the sample, you will need to replace the values in the config, - // including the clientSecret - const config = { - auth: { - clientId: "Enter_the_Application_Id", - authority: "Enter_the_Cloud_Instance_Id_Here/Enter_the_Tenant_Id_here", - clientSecret: "Enter_the_Client_secret" - }, -     system: { -         loggerOptions: { -             loggerCallback(loglevel, message, containsPii) { -                 console.log(message); -             }, -          piiLoggingEnabled: false, -          logLevel: msal.LogLevel.Verbose, -         } -     } - }; +```console + npm install --save express-session dotenv axios ``` +## Add app registration details + +1. Create an *.env* file in the root of your project folder. Then add the following code: + +:::code language="text" source="~/ms-identity-node/App/.env"::: + Fill in these details with the values you obtain from Azure app registration portal: -- `Enter_the_Tenant_Id_here` should be one of the following: +- `Enter_the_Cloud_Instance_Id_Here`: The Azure cloud instance in which your application is registered. + - For the main (or *global*) Azure cloud, enter `https://login.microsoftonline.com/` (include the trailing forward-slash). + - For **national** clouds (for example, China), you can find appropriate values in [National clouds](authentication-national-cloud.md). +- `Enter_the_Tenant_Info_here` should be one of the following: - If your application supports *accounts in this organizational directory*, replace this value with the **Tenant ID** or **Tenant name**. For example, `contoso.microsoft.com`. - If your application supports *accounts in any organizational directory*, replace this value with `organizations`. - If your application supports *accounts in any organizational directory and personal Microsoft accounts*, replace this value with `common`. - To restrict support to *personal Microsoft accounts only*, replace this value with `consumers`. - `Enter_the_Application_Id_Here`: The **Application (client) ID** of the application you registered. -- `Enter_the_Cloud_Instance_Id_Here`: The Azure cloud instance in which your application is registered. - - For the main (or *global*) Azure cloud, enter `https://login.microsoftonline.com`. - - For **national** clouds (for example, China), you can find appropriate values in [National clouds](authentication-national-cloud.md). - `Enter_the_Client_secret`: Replace this value with the client secret you created earlier. To generate a new key, use **Certificates & secrets** in the app registration settings in the Azure portal. > [!WARNING] > Any plaintext secret in source code poses an increased security risk. This article uses a plaintext client secret for simplicity only. Use [certificate credentials](active-directory-certificate-credentials.md) instead of client secrets in your confidential client applications, especially those apps you intend to deploy to production. -## Add code for user login - -In the *index.js* file you've created earlier, add the following code: - -```JavaScript - // Create msal application object - const cca = new msal.ConfidentialClientApplication(config); - - app.get('/', (req, res) => { - const authCodeUrlParameters = { - scopes: ["user.read"], - redirectUri: "http://localhost:3000/redirect", - }; - - // get url to sign user in and consent to scopes needed for application - cca.getAuthCodeUrl(authCodeUrlParameters).then((response) => { - res.redirect(response); - }).catch((error) => console.log(JSON.stringify(error))); - }); - - app.get('/redirect', (req, res) => { - const tokenRequest = { - code: req.query.code, - scopes: ["user.read"], - redirectUri: "http://localhost:3000/redirect", - }; - - cca.acquireTokenByCode(tokenRequest).then((response) => { - console.log("\nResponse: \n:", response); - res.sendStatus(200); - }).catch((error) => { - console.log(error); - res.status(500).send(error); - }); - }); -``` +- `Enter_the_Graph_Endpoint_Here`: The Microsoft Graph API cloud instance that your app will call. For the main (global) Microsoft Graph API service, enter `https://graph.microsoft.com/` (include the trailing forward-slash). +- `Enter_the_Express_Session_Secret_Here` the secret used to sign the Express session cookie. Choose a random string of characters to replace this string with, such as your client secret. + + +2. Next, create a file named *authConfig.js* in the root of your project for reading in these parameters. Once created, add the following code there: + +:::code language="js" source="~/ms-identity-node/App/authConfig.js"::: + +## Add code for user login and token acquisition + +:::code language="js" source="~/ms-identity-node/App/routes/auth.js"::: + +2. Next, update the *index.js* route by replacing the existing code with the following: + +:::code language="js" source="~/ms-identity-node/App/routes/index.js"::: + +3. Finally, update the *users.js* route by replacing the existing code with the following: -## Test sign in +:::code language="js" source="~/ms-identity-node/App/routes/users.js"::: + +## Add code for calling the Microsoft Graph API + +Create a file named **fetch.js** in the root of your project and add the following code: + +:::code language="js" source="~/ms-identity-node/App/fetch.js"::: + +## Add views for displaying data + +1. In the *views* folder, update the *index.hbs* file by replacing the existing code with the following: + +:::code language="hbs" source="~/ms-identity-node/App/views/index.hbs"::: + +2. Still in the same folder, create another file named *id.hbs* for displaying the contents of user's ID token: + +:::code language="hbs" source="~/ms-identity-node/App/views/id.hbs"::: + +3. Finally, create another file named *profile.hbs* for displaying the result of the call made to Microsoft Graph: + +:::code language="hbs" source="~/ms-identity-node/App/views/profile.hbs"::: + +## Register routers and add state management + +In the *app.js* file in the root of the project folder, register the routes you have created earlier and add session support for tracking authentication state using the **express-session** package. Replace the existing code there with the following: + +:::code language="js" source="~/ms-identity-node/App/app.js"::: + +## Test sign in and call Microsoft Graph You've completed creation of the application and are now ready to test the app's functionality. 1. Start the Node.js console app by running the following command from within the root of your project folder: ```console - node index.js + npm start ``` -2. Open a browser window and navigate to `http://localhost:3000`. You should see a sign-in screen: +2. Open a browser window and navigate to `http://localhost:3000`. You should see a welcome page: + +:::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/welcome-screen.png" alt-text="Web app welcome page displaying"::: + +3. Select **Sign in** link. You should see the Azure AD sign-in screen: :::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/sign-in-screen.png" alt-text="Azure AD sign-in screen displaying"::: -3. Once you enter your credentials, you should see a consent screen asking you to approve the permissions for the app. +4. Once you enter your credentials, you should see a consent screen asking you to approve the permissions for the app. :::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/consent-screen.png" alt-text="Azure AD consent screen displaying"::: +5. Once you consent, you should be redirected back to application home page. + +:::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/post-sign-in-screen.png" alt-text="Web app welcome page after sign-in displaying"::: + +6. Select the **View ID Token** link for displaying the contents of the signed-in user's ID token. + +:::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/id-token-screen.png" alt-text="User ID token screen displaying"::: + +7. Go back to the home page, and select the **Acquire an access token and call the Microsoft Graph API** link. Once you do, you should see the response from Microsoft Graph /me endpoint for the signed-in user. + +:::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/graph-call-screen.png" alt-text="Graph call screen displaying"::: + +8. Go back to the home page, and select the **Sign out** link. You should see the Azure AD sign-out screen. + +:::image type="content" source="media/tutorial-v2-nodejs-webapp-msal/sign-out-screen.png" alt-text="Azure AD sign-out screen displaying"::: + ## How the application works -In this tutorial, you initialized an MSAL Node [ConfidentialClientApplication](https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-node/docs/initialize-confidential-client-application.md) object by passing it a configuration object (*msalConfig*) that contains parameters obtained from your Azure AD app registration on Azure portal. The web app you created uses the [OAuth 2.0 Authorization code grant flow](./v2-oauth2-auth-code-flow.md) to sign-in users and obtain ID and access tokens. +In this tutorial, you instantiated an MSAL Node [ConfidentialClientApplication](https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-node/docs/initialize-confidential-client-application.md) object by passing it a configuration object (*msalConfig*) that contains parameters obtained from your Azure AD app registration on Azure portal. The web app you created uses the [OpenID Connect protocol](./v2-protocols-oidc.md) to sign-in users and the [OAuth 2.0 Authorization code grant flow](./v2-oauth2-auth-code-flow.md) obtain access tokens. ## Next steps diff --git a/articles/active-directory/devices/howto-vm-sign-in-azure-ad-windows.md b/articles/active-directory/devices/howto-vm-sign-in-azure-ad-windows.md index 3f5e7b45bb7e..9ce866fe95b4 100644 --- a/articles/active-directory/devices/howto-vm-sign-in-azure-ad-windows.md +++ b/articles/active-directory/devices/howto-vm-sign-in-azure-ad-windows.md @@ -274,14 +274,14 @@ The AADLoginForWindows extension must install successfully in order for the VM t 1. Ensure the required endpoints are accessible from the VM using PowerShell: - - `curl https://login.microsoftonline.com/ -D -` - - `curl https://login.microsoftonline.com// -D -` - - `curl https://enterpriseregistration.windows.net/ -D -` - - `curl https://device.login.microsoftonline.com/ -D -` - - `curl https://pas.windows.net/ -D -` + - `curl.exe https://login.microsoftonline.com/ -D -` + - `curl.exe https://login.microsoftonline.com// -D -` + - `curl.exe https://enterpriseregistration.windows.net/ -D -` + - `curl.exe https://device.login.microsoftonline.com/ -D -` + - `curl.exe https://pas.windows.net/ -D -` > [!NOTE] - > Replace `` with the Azure AD Tenant ID that is associated with the Azure subscription.
`enterpriseregistration.windows.net` and `pas.windows.net` should return 404 Not Found, which is expected behavior. + > Replace `` with the Azure AD Tenant ID that is associated with the Azure subscription.
`login.microsoftonline.com/`, `enterpriseregistration.windows.net`, and `pas.windows.net` should return 404 Not Found, which is expected behavior. 1. The Device State can be viewed by running `dsregcmd /status`. The goal is for Device State to show as `AzureAdJoined : YES`. diff --git a/articles/active-directory/devices/hybrid-azuread-join-control.md b/articles/active-directory/devices/hybrid-azuread-join-control.md index ff95afc45afa..4aa7bd911740 100644 --- a/articles/active-directory/devices/hybrid-azuread-join-control.md +++ b/articles/active-directory/devices/hybrid-azuread-join-control.md @@ -72,10 +72,10 @@ Use the following example to create a Group Policy Object (GPO) to deploy a regi ### Configure AD FS settings -If you're using AD FS, you first need to configure client-side SCP using the instructions mentioned earlier by linking the GPO to your AD FS servers. The SCP object defines the source of authority for device objects. It can be on-premises or Azure AD. When client-side SCP is configured for AD FS, the source for device objects is established as Azure AD. +If your Azure AD is federated with AD FS, you first need to configure client-side SCP using the instructions mentioned earlier by linking the GPO to your AD FS servers. The SCP object defines the source of authority for device objects. It can be on-premises or Azure AD. When client-side SCP is configured for AD FS, the source for device objects is established as Azure AD. > [!NOTE] -> If you failed to configure client-side SCP on your AD FS servers, the source for device identities would be considered as on-premises. ADFS will then start deleting device objects from on-premises directory after the stipulated period defined in the ADFS Device Registration's attribute "MaximumInactiveDays". ADFS Device Registration objects can be found using the [Get-AdfsDeviceRegistration cmdlet](/powershell/module/adfs/get-adfsdeviceregistration). +> If you failed to configure client-side SCP on your AD FS servers, the source for device identities would be considered as on-premises. AD FS will then start deleting device objects from on-premises directory after the stipulated period defined in the AD FS Device Registration's attribute "MaximumInactiveDays". AD FS Device Registration objects can be found using the [Get-AdfsDeviceRegistration cmdlet](/powershell/module/adfs/get-adfsdeviceregistration). ## Supporting down-level devices diff --git a/articles/active-directory/enterprise-users/licensing-groups-assign.md b/articles/active-directory/enterprise-users/licensing-groups-assign.md index 856aeb69c227..ca79e54bbd04 100644 --- a/articles/active-directory/enterprise-users/licensing-groups-assign.md +++ b/articles/active-directory/enterprise-users/licensing-groups-assign.md @@ -11,7 +11,7 @@ ms.service: active-directory ms.subservice: enterprise-users ms.topic: how-to ms.workload: identity -ms.date: 12/02/2020 +ms.date: 05/26/2022 ms.author: curtand ms.reviewer: sumitp ms.custom: it-pro @@ -27,7 +27,7 @@ In this example, the Azure AD organization contains a security group called **HR > [!NOTE] > Some Microsoft services are not available in all locations. Before a license can be assigned to a user, the administrator has to specify the Usage location property on the user. > -> For group license assignment, any users without a usage location specified inherit the location of the directory. If you have users in multiple locations, we recommend that you always set usage location as part of your user creation flow in Azure AD (e.g. via AAD Connect configuration) - that ensures the result of license assignment is always correct and users do not receive services in locations that are not allowed. +> For group license assignment, any users without a usage location specified inherit the location of the directory. If you have users in multiple locations, we recommend that you always set usage location as part of your user creation flow in Azure AD. For example, configure Azure AD Connect configuration to set usage location. This recommendation makes sure the result of license assignment is always correct and users do not receive services in locations that are not allowed. ## Step 1: Assign the required licenses @@ -43,6 +43,9 @@ In this example, the Azure AD organization contains a security group called **HR 1. Select a user or group, and then use the **Select** button at the bottom of the page to confirm your selection. + >[!NOTE] + >When assigning licenses to a group with service plans that have dependencies on other service plans, they must both be assigned together in the same group, otherwise the service plan with the dependency will be disabled. + 1. On the **Assign license** page, click **Assignment options**, which displays all service plans included in the two products that we selected previously. Find **Yammer Enterprise** and turn it **Off** to disable that service from the product license. Confirm by clicking **OK** at the bottom of **License options**. ![select service plans for licenses](./media/licensing-groups-assign/assignment-options.png) diff --git a/articles/active-directory/fundamentals/whats-new-archive.md b/articles/active-directory/fundamentals/whats-new-archive.md index abfbe8a598d7..5784a94f98cb 100644 --- a/articles/active-directory/fundamentals/whats-new-archive.md +++ b/articles/active-directory/fundamentals/whats-new-archive.md @@ -31,6 +31,202 @@ The What's new in Azure Active Directory? release notes provide information abou --- +## November 2021 + +### Tenant enablement of combined security information registration for Azure Active Directory + +**Type:** Plan for change +**Service category:** MFA +**Product capability:** Identity Security & Protection + +We previously announced in April 2020, a new combined registration experience enabling users to register authentication methods for SSPR and multi-factor authentication at the same time was generally available for existing customer to opt in. Any Azure AD tenants created after August 2020 automatically have the default experience set to combined registration. Starting 2022, Microsoft will be enabling the MFA/SSPR combined registration experience for existing customers. [Learn more](../authentication/concept-registration-mfa-sspr-combined.md). + +--- + +### Windows users will see prompts more often when switching user accounts + +**Type:** Fixed +**Service category:** Authentications (Logins) +**Product capability:** User Authentication + +A problematic interaction between Windows and a local Active Directory Federation Services (ADFS) instance can result in users attempting to sign into another account, but be silently signed into their existing account instead, with no warning. For federated IdPs such as ADFS, that support the [prompt=login](/windows-server/identity/ad-fs/operations/ad-fs-prompt-login) pattern, Azure AD will now trigger a fresh login at ADFS when a user is directed to ADFS with a login hint. This ensures that the user is signed into the account they requested, rather than being silently signed into the account they're already signed in with. + +For more information, see the [change notice](../develop/reference-breaking-changes.md). + +--- + +### Public preview - Conditional Access Overview Dashboard + +**Type:** New feature +**Service category:** Conditional Access +**Product capability:** Monitoring & Reporting + +The new Conditional Access overview dashboard enables all tenants to see insights about the impact of their Conditional Access policies without requiring an Azure Monitor subscription. This built-in dashboard provides tutorials to deploy policies, a summary of the policies in your tenant, a snapshot of your policy coverage, and security recommendations. [Learn more](../conditional-access/overview.md). + +--- + +### Public preview - SSPR writeback is now available for disconnected forests using Azure AD Connect cloud sync + +**Type:** New feature +**Service category:** Azure AD Connect Cloud Sync +**Product capability:** Identity Lifecycle Management + +The Public Preview feature for Azure AD Connect Cloud Sync Password writeback provides customers the capability to writeback a user’s password changes in the cloud to the on-premises directory in real time using the lightweight Azure AD cloud provisioning agent.[Learn more](../authentication/tutorial-enable-cloud-sync-sspr-writeback.md). + +--- + +### Public preview - Conditional Access for workload identities + +**Type:** New feature +**Service category:** Conditional Access for workload identities +**Product capability:** Identity Security & Protection + +Previously, Conditional Access policies applied only to users when they access apps and services like SharePoint online or the Azure portal. This preview adds support for Conditional Access policies applied to service principals owned by the organization. You can block service principals from accessing resources from outside trusted-named locations or Azure Virtual Networks. [Learn more](../conditional-access/workload-identity.md). + +--- + +### Public preview - Extra attributes available as claims + +**Type:** Changed feature +**Service category:** Enterprise Apps +**Product capability:** SSO + +Several user attributes have been added to the list of attributes available to map to claims to bring attributes available in claims more in line with what is available on the user object in Microsoft Graph. New attributes include mobilePhone and ProxyAddresses. [Learn more](../develop/reference-claims-mapping-policy-type.md#table-3-valid-id-values-per-source). + +--- + +### Public preview - "Session Lifetime Policies Applied" property in the sign-in logs + +**Type:** New feature +**Service category:** Authentications (Logins) +**Product capability:** Identity Security & Protection + +We have recently added other property to the sign-in logs called "Session Lifetime Policies Applied". This property will list all the session lifetime policies that applied to the sign-in for example, Sign-in frequency, Remember multi-factor authentication and Configurable token lifetime. [Learn more](../reports-monitoring/concept-sign-ins.md#authentication-details). + +--- + +### Public preview - Enriched reviews on access packages in entitlement management + +**Type:** New feature +**Service category:** User Access Management +**Product capability:** Entitlement Management + +Entitlement Management’s enriched review experience allows even more flexibility on access packages reviews. Admins can now choose what happens to access if the reviewers don't respond, provide helper information to reviewers, or decide whether a justification is necessary. [Learn more](../governance/entitlement-management-access-reviews-create.md). + +--- + +### General availability - randomString and redact provisioning functions + +**Type:** New feature +**Service category:** Provisioning +**Product capability:** Outbound to SaaS Applications + + +The Azure AD Provisioning service now supports two new functions, randomString() and Redact(): +- randomString - generate a string based on the length and characters you would like to include or exclude in your string. +- redact - remove the value of the attribute from the audit and provisioning logs. [Learn more](../app-provisioning/functions-for-customizing-application-data.md#randomstring). + +--- + +### General availability - Now access review creators can select users and groups to receive notification on completion of reviews + +**Type:** New feature +**Service category:** Access Reviews +**Product capability:** Identity Governance + +Now access review creators can select users and groups to receive notification on completion of reviews. [Learn more](../governance/create-access-review.md). + +--- + +### General availability - Azure AD users can now view and report suspicious sign-ins and manage their accounts within Microsoft Authenticator + +**Type:** New feature +**Service category:** Microsoft Authenticator App +**Product capability:** Identity Security & Protection + +This feature allows Azure AD users to manage their work or school accounts within the Microsoft Authenticator app. The management features will allow users to view sign-in history and sign-in activity. Users can also report any suspicious or unfamiliar activity, change their Azure AD account passwords, and update the account's security information. + +For more information on how to use this feature visit [View and search your recent sign-in activity from the My Sign-ins page](../user-help/my-account-portal-sign-ins-page.md). + +--- + +### General availability - New Microsoft Authenticator app icon + +**Type:** New feature +**Service category:** Microsoft Authenticator App +**Product capability:** Identity Security & Protection + +New updates have been made to the Microsoft Authenticator app icon. To learn more about these updates, see the [Microsoft Authenticator app](https://techcommunity.microsoft.com/t5/azure-active-directory-identity/microsoft-authenticator-app-easier-ways-to-add-or-manage/ba-p/2464408) blog post. + +--- + +### General availability - Azure AD single Sign-on and device-based Conditional Access support in Firefox on Windows 10/11 + +**Type:** New feature +**Service category:** Authentications (Logins) +**Product capability:** SSO + +We now support native single sign-on (SSO) support and device-based Conditional Access to Firefox browser on Windows 10 and Windows Server 2019 starting in Firefox version 91. [Learn more](../conditional-access/require-managed-devices.md#prerequisites). + +--- + +### New provisioning connectors in the Azure AD Application Gallery - November 2021 + +**Type:** New feature +**Service category:** App Provisioning +**Product capability:** 3rd Party Integration + +You can now automate creating, updating, and deleting user accounts for these newly integrated apps: + +- [Appaegis Isolation Access Cloud](../saas-apps/appaegis-isolation-access-cloud-provisioning-tutorial.md) +- [BenQ IAM](../saas-apps/benq-iam-provisioning-tutorial.md) +- [BIC Cloud Design](../saas-apps/bic-cloud-design-provisioning-tutorial.md) +- [Chaos](../saas-apps/chaos-provisioning-tutorial.md) +- [directprint.io](../saas-apps/directprint-io-provisioning-tutorial.md) +- [Documo](../saas-apps/documo-provisioning-tutorial.md) +- [Facebook Work Accounts](../saas-apps/facebook-work-accounts-provisioning-tutorial.md) +- [introDus Pre and Onboarding Platform](../saas-apps/introdus-pre-and-onboarding-platform-provisioning-tutorial.md) +- [Kisi Physical Security](../saas-apps/kisi-physical-security-provisioning-tutorial.md) +- [Klaxoon](../saas-apps/klaxoon-provisioning-tutorial.md) +- [Klaxoon SAML](../saas-apps/klaxoon-saml-provisioning-tutorial.md) +- [MX3 Diagnostics](../saas-apps/mx3-diagnostics-connector-provisioning-tutorial.md) +- [Netpresenter](../saas-apps/netpresenter-provisioning-tutorial.md) +- [Peripass](../saas-apps/peripass-provisioning-tutorial.md) +- [Real Links](../saas-apps/real-links-provisioning-tutorial.md) +- [Sentry](../saas-apps/sentry-provisioning-tutorial.md) +- [Teamgo](../saas-apps/teamgo-provisioning-tutorial.md) +- [Zero](../saas-apps/zero-provisioning-tutorial.md) + +For more information about how to better secure your organization by using automated user account provisioning, see [Automate user provisioning to SaaS applications with Azure AD](../manage-apps/user-provisioning.md). + +--- + +### New Federated Apps available in Azure AD Application gallery - November 2021 + +**Type:** New feature +**Service category:** Enterprise Apps +**Product capability:** 3rd Party Integration + +In November 2021, we have added following 32 new applications in our App gallery with Federation support: + +[Tide - Connector](https://gallery.ctinsuretech-tide.com/), [Virtual Risk Manager - USA](../saas-apps/virtual-risk-manager-usa-tutorial.md), [Xorlia Policy Management](https://app.xoralia.com/), [WorkPatterns](https://app.workpatterns.com/oauth2/login?data_source_type=office_365_account_calendar_workspace_sync&utm_source=azure_sso), [GHAE](../saas-apps/ghae-tutorial.md), [Nodetrax Project](../saas-apps/nodetrax-project-tutorial.md), [Touchstone Benchmarking](https://app.touchstonebenchmarking.com/), [SURFsecureID - Azure MFA](../saas-apps/surfsecureid-azure-mfa-tutorial.md), [AiDEA](https://truebluecorp.com/en/prodotti/aidea-en/),[R and D Tax Credit Services: 10-wk Implementation](../saas-apps/r-and-d-tax-credit-services-tutorial.md), [Mapiq Essentials](../saas-apps/mapiq-essentials-tutorial.md), [Celtra Authentication Service](https://auth.celtra.com/login), [Compete HR](https://app.competewith.com/auth/login), [Snackmagic](../saas-apps/snackmagic-tutorial.md), [FileOrbis](../saas-apps/fileorbis-tutorial.md), [ClarivateWOS](../saas-apps/clarivatewos-tutorial.md), [RewardCo Engagement Cloud](https://cloud.live.rewardco.com/oauth/login), [ZoneVu](https://zonevu.ubiterra.com/onboarding/index), [V-Client](../saas-apps/v-client-tutorial.md), [Netpresenter Next](https://www.netpresenter.com/), [UserTesting](../saas-apps/usertesting-tutorial.md), [InfinityQS ProFicient on Demand](../saas-apps/infinityqs-proficient-on-demand-tutorial.md), [Feedonomics](https://auth.feedonomics.com/), [Customer Voice](https://cx.pobuca.com/), [Zanders Inside](https://home.zandersinside.com/), [Connecter](https://teamwork.connecterapp.com/azure_login), [Paychex Flex](https://login.flex.paychex.com/azfed-app/v1/azure/federation/admin), [InsightSquared](https://us2.insightsquared.com/#/boards/office365.com/settings/userconnection), [Kiteline Health](https://my.kitelinehealth.com/), [Fabrikam Enterprise Managed User (OIDC)](https://github.com/login), [PROXESS for Office365](https://www.proxess.de/office365), [Coverity Static Application Security Testing](../saas-apps/coverity-static-application-security-testing-tutorial.md) + +You can also find the documentation of all the applications [here](../saas-apps/tutorial-list.md). + +For listing your application in the Azure AD app gallery, read the details [here](../manage-apps/v2-howto-app-gallery-listing.md). + +--- + +### Updated "switch organizations" user experience in My Account. + +**Type:** Changed feature +**Service category:** My Profile/Account +**Product capability:** End User Experiences + +Updated "switch organizations" user interface in My Account. This visually improves the UI and provides the end-user with clear instructions. Added a manage organizations link to blade per customer feedback. [Learn more](https://support.microsoft.com/account-billing/switch-organizations-in-your-work-or-school-account-portals-c54c32c9-2f62-4fad-8c23-2825ed49d146). + +--- + ## October 2021 ### Limits on the number of configured API permissions for an application registration will be enforced starting in October 2021 diff --git a/articles/active-directory/fundamentals/whats-new.md b/articles/active-directory/fundamentals/whats-new.md index 247970621f60..1db1d9edcea8 100644 --- a/articles/active-directory/fundamentals/whats-new.md +++ b/articles/active-directory/fundamentals/whats-new.md @@ -31,6 +31,165 @@ Azure AD receives improvements on an ongoing basis. To stay up to date with the This page is updated monthly, so revisit it regularly. If you're looking for items older than six months, you can find them in [Archive for What's new in Azure Active Directory](whats-new-archive.md). +## May 2022 + +### General Availability: Tenant-based service outage notifications + +**Type:** Plan for change +**Service category:** Other +**Product capability:** Platform + + +Azure Service Health will soon support service outage notifications to Tenant Admins for Azure Active Directory issues in the near future. These outages will also appear on the Azure AD admin portal overview page with appropriate links to Azure Service Health. Outage events will be able to be seen by built-in Tenant Administrator Roles. We will continue to send outage notifications to subscriptions within a tenant for a period of transition. More information will be available when this capability is released. The expected release is for June 2022. + +--- + + + +### New Federated Apps available in Azure AD Application gallery - May 2022 + +**Type:** New feature +**Service category:** Enterprise Apps +**Product capability:** 3rd Party Integration + + + +In May 2022 we've added the following 25 new applications in our App gallery with Federation support: + +[UserZoom](../saas-apps/userzoom-tutorial.md), [AMX Mobile](https://www.amxsolutions.co.uk/), [i-Sight](../saas-apps/isight-tutorial.md), [Method InSight](https://digital.methodrecycling.com/), [Chronus SAML](../saas-apps/chronus-saml-tutorial.md), [Attendant Console for Microsoft Teams](https://attendant.anywhere365.io/), [Skopenow](../saas-apps/skopenow-tutorial.md), [Fidelity PlanViewer](../saas-apps/fidelity-planviewer-tutorial.md), [Lyve Cloud](../saas-apps/lyve-cloud-tutorial.md), [Framer](../saas-apps/framer-tutorial.md), [Authomize](../saas-apps/authomize-tutorial.md), [gamba!](../saas-apps/gamba-tutorial.md), [Datto File Protection Single Sign On](../saas-apps/datto-file-protection-tutorial.md), [LONEALERT](https://portal.lonealert.co.uk/auth/azure/saml/signin), [Payfactors](https://pf.payfactors.com/client/auth/login), [deBroome Brand Portal](../saas-apps/debroome-brand-portal-tutorial.md), [TeamSlide](../saas-apps/teamslide-tutorial.md), [Sensera Systems](https://sitecloud.senserasystems.com/), [YEAP](https://prismaonline.propay.be/logon/login.aspx), [Monaca Education](https://monaca.education/ja/signup), [Personify Inc](https://personifyinc.com/login), [Phenom TXM](../saas-apps/phenom-txm-tutorial.md), [Forcepoint Cloud Security Gateway - User Authentication](../saas-apps/forcepoint-cloud-security-gateway-tutorial.md), [GoalQuest](../saas-apps/goalquest-tutorial.md), [OpenForms](https://login.openforms.com/Login). + +You can also find the documentation of all the applications from here https://aka.ms/AppsTutorial, + +For listing your application in the Azure AD app gallery, please read the details here https://aka.ms/AzureADAppRequest + + + + + +--- + + +### General Availability – My Apps users can make apps from URLs (add sites) + +**Type:** New feature +**Service category:** My Apps +**Product capability:** End User Experiences + + +When editing a collection using the My Apps portal, users can now add their own sites, in addition to adding apps that have been assigned to them by an admin. To add a site, users must provide a name and URL. For more information on how to use this feature, see: [Customize app collections in the My Apps portal](https://support.microsoft.com/account-billing/customize-app-collections-in-the-my-apps-portal-2dae6b8a-d8b0-4a16-9a5d-71ed4d6a6c1d). + + +--- + + +### Public preview - New provisioning connectors in the Azure AD Application Gallery - May 2022 + +**Type:** New feature +**Service category:** App Provisioning +**Product capability:** 3rd Party Integration + + +You can now automate creating, updating, and deleting user accounts for these newly integrated apps: + +- [Alinto Protect](../saas-apps/alinto-protect-provisioning-tutorial.md) +- [Blinq](../saas-apps/blinq-provisioning-tutorial.md) +- [Cerby](../saas-apps/cerby-provisioning-tutorial.md) + +For more information about how to better secure your organization by using automated user account provisioning, see: [Automate user provisioning to SaaS applications with Azure AD](../app-provisioning/user-provisioning.md). + + +--- + + +### Public Preview: Confirm safe and compromised in signIns API beta + +**Type:** New feature +**Service category:** Identity Protection +**Product capability:** Identity Security & Protection + + +The signIns Microsoft Graph API now supports confirming safe and compromised on risky sign-ins. This public preview functionality is available at the beta endpoint. For more information, please check out the Microsoft Graph documentation: [signIn: confirmSafe - Microsoft Graph beta | Microsoft Docs](/graph/api/signin-confirmsafe?view=graph-rest-beta&preserve-view=true) + + +--- + + +### Public Preview of Microsoft cloud settings for Azure AD B2B + +**Type:** New feature +**Service category:** B2B +**Product capability:** B2B/B2C +**Clouds impacted:** China;Public (M365,GCC);US Gov (GCC-H, DoD) + + +Microsoft cloud settings let you collaborate with organizations from different Microsoft Azure clouds. With Microsoft cloud settings, you can establish mutual B2B collaboration between the following clouds: + +-Microsoft Azure global cloud and Microsoft Azure Government +-Microsoft Azure global cloud and Microsoft Azure China 21Vianet + +To learn more about Microsoft cloud settings for B2B collaboration, see: [Cross-tenant access overview - Azure AD | Microsoft Docs](../external-identities/cross-tenant-access-overview.md#microsoft-cloud-settings). + + +--- + + +### General Availability of SAML and WS-Fed federation in External Identities + +**Type:** Changed feature +**Service category:** B2B +**Product capability:** B2B/B2C +**Clouds impacted:** Public (M365,GCC);US Gov (GCC-H, DoD) + + +When setting up federation with a partner's IdP, new guest users from that domain can use their own IdP-managed organizational account to sign in to your Azure AD tenant and start collaborating with you. There's no need for the guest user to create a separate Azure AD account. To learn more about federating with SAML or WS-Fed identity providers in External Identities, see: [Federation with a SAML/WS-Fed identity provider (IdP) for B2B - Azure AD | Microsoft Docs](../external-identities/direct-federation.md). + + +--- + + +### Public Preview - Create Group in Administrative Unit + +**Type:** Changed feature +**Service category:** Directory Management +**Product capability:** Access Control +**Clouds impacted:** China;Public (M365,GCC);US Gov (GCC-H, DoD) + + +Groups Administrators assigned over the scope of an administrative unit can now create groups within the administrative unit. This enables scoped group administrators to create groups that they can manage directly, without needing to elevate to Global Administrator or Privileged Role Administrator. For more information, see: [Administrative units in Azure Active Directory](../roles/administrative-units.md). + + +--- + + +### Public Preview - Dynamic administrative unit support for onPremisesDistinguishedName property + +**Type:** Changed feature +**Service category:** Directory Management +**Product capability:** AuthZ/Access Delegation +**Clouds impacted:** Public (M365,GCC) + + +The public preview of dynamic administrative units now supports the **onPremisesDistinguishedName** property for users. This makes it possible to create dynamic rules which incorporate the organizational unit of the user from on-premises AD. For more information, see: [Manage users or devices for an administrative unit with dynamic membership rules (Preview)](../roles/admin-units-members-dynamic.md). + + +--- + + +### General Availability - Improvements to Azure AD Smart Lockout + +**Type:** Changed feature +**Service category:** Other +**Product capability:** User Management +**Clouds impacted:** China;Public (M365,GCC);US Gov (GCC-H, DoD);US Nat;US Sec + + +Smart Lockout now synchronizes the lockout state across Azure AD data centers, so the total number of failed sign-in attempts allowed before an account is locked out will match the configured lockout threshold. For more information, see: [Protect user accounts from attacks with Azure Active Directory smart lockout](../authentication/howto-password-smart-lockout.md). + + +--- + + + ## April 2022 ### General Availability - Microsoft Defender for Endpoint Signal in Identity Protection @@ -147,7 +306,7 @@ We highly recommend enabling this new protection when using Azure AD Multi-Facto **Service category:** Enterprise Apps **Product capability:** Third Party Integration -In April 2022 we added the following 24 new applications in our App gallery with Federation support +In April 2022 we added the following 24 new applications in our App gallery with Federation support: [X-1FBO](https://www.x1fbo.com/), [select Armor](https://app.clickarmor.ca/), [Smint.io Portals for SharePoint](https://www.smint.io/portals-for-sharepoint/), [Pluto](../saas-apps/pluto-tutorial.md), [ADEM](../saas-apps/adem-tutorial.md), [Smart360](../saas-apps/smart360-tutorial.md), [MessageWatcher SSO](https://messagewatcher.com/), [Beatrust](../saas-apps/beatrust-tutorial.md), [AeyeScan](https://aeyescan.com/azure_sso), [ABa Customer](https://abacustomer.com/), [Twilio Sendgrid](../saas-apps/twilio-sendgrid-tutorial.md), [Vault Platform](../saas-apps/vault-platform-tutorial.md), [Speexx](../saas-apps/speexx-tutorial.md), [Clicksign](https://app.clicksign.com/signin), [Per Angusta](../saas-apps/per-angusta-tutorial.md), [EruditAI](https://dashboard.erudit.ai/login), [MetaMoJi ClassRoom](https://business.metamoji.com/), [Numici](https://app.numici.com/), [MCB.CLOUD](https://identity.mcb.cloud/Identity/Account/Manage), [DepositLink](https://depositlink.com/external-login), [Last9](https://auth.last9.io/auth), [ParkHere Corporate](../saas-apps/parkhere-corporate-tutorial.md), [Keepabl](../saas-apps/keepabl-tutorial.md), [Swit](../saas-apps/swit-tutorial.md) You can also find the documentation of all the applications from here https://aka.ms/AppsTutorial. diff --git a/articles/active-directory/identity-protection/howto-identity-protection-remediate-unblock.md b/articles/active-directory/identity-protection/howto-identity-protection-remediate-unblock.md index 685bdfcdeb86..452a2629c7e7 100644 --- a/articles/active-directory/identity-protection/howto-identity-protection-remediate-unblock.md +++ b/articles/active-directory/identity-protection/howto-identity-protection-remediate-unblock.md @@ -67,6 +67,8 @@ If a password reset isn't an option for you, you can choose to dismiss user risk When you select **Dismiss user risk**, all events are closed and the affected user is no longer at risk. However, because this method doesn't have an impact on the existing password, it doesn't bring the related identity back into a safe state. +To **Dismiss user risk**, search for and select **Azure AD Risky users**, select the affected user, and select **Dismiss user(s) risk**. + ### Close individual risk detections manually You can close individual risk detections manually. By closing risk detections manually, you can lower the user risk level. Typically, risk detections are closed manually in response to a related investigation. For example, when talking to a user reveals that an active risk detection isn't required anymore. diff --git a/articles/active-directory/managed-identities-azure-resources/TOC.yml b/articles/active-directory/managed-identities-azure-resources/TOC.yml index 8e9cece892b3..b664b589be2d 100644 --- a/articles/active-directory/managed-identities-azure-resources/TOC.yml +++ b/articles/active-directory/managed-identities-azure-resources/TOC.yml @@ -76,6 +76,8 @@ href: qs-configure-rest-vm.md - name: Azure SDKs href: qs-configure-sdk-windows-vm.md + - name: Using Azure Policy + href: how-to-assign-managed-identity-via-azure-policy.md - name: Configure managed identities on virtual machine scale sets items: @@ -89,6 +91,8 @@ href: qs-configure-template-windows-vmss.md - name: REST href: qs-configure-rest-vmss.md + - name: Using Azure Policy + href: how-to-assign-managed-identity-via-azure-policy.md - name: Use managed identities on VMs items: @@ -105,9 +109,7 @@ - name: CLI href: howto-assign-access-cli.md - name: PowerShell - href: howto-assign-access-powershell.md - - name: Using Azure Policy - href: how-to-assign-managed-identity-via-azure-policy.md + href: howto-assign-access-powershell.md - name: Manage user-assigned managed identities href: how-manage-user-assigned-managed-identities.md diff --git a/articles/active-directory/privileged-identity-management/pim-resource-roles-configure-alerts.md b/articles/active-directory/privileged-identity-management/pim-resource-roles-configure-alerts.md index 14e3be588b5b..d6cb49d9eede 100644 --- a/articles/active-directory/privileged-identity-management/pim-resource-roles-configure-alerts.md +++ b/articles/active-directory/privileged-identity-management/pim-resource-roles-configure-alerts.md @@ -10,7 +10,7 @@ ms.topic: how-to ms.tgt_pltfrm: na ms.workload: identity ms.subservice: pim -ms.date: 05/24/2022 +ms.date: 10/07/2021 ms.author: curtand ms.reviewer: shaunliu ms.custom: pim @@ -31,15 +31,11 @@ Select an alert to see a report that lists the users or roles that triggered the ## Alerts -Alert | Severity | Trigger | Recommendation ---- | --- | --- | --- -**Too many owners assigned to a resource** |Medium |Too many users have the owner role. |Review the users in the list and reassign some to less privileged roles. -**Too many permanent owners assigned to a resource** |Medium |Too many users are permanently assigned to a role. |Review the users in the list and re-assign some to require activation for role use. -**Duplicate role created** |Medium |Multiple roles have the same criteria. |Use only one of these roles. -**Roles are being assigned outside of Privileged Identity Management (Preview)** | High | A role is managed directly through the Azure IAM resource blade or the Azure Resource Manager API | Review the users in the list and remove them from privileged roles assigned outside of Privilege Identity Management. - -> [!Note] -> During the public preview of the **Roles are being assigned outside of Privileged Identity Management (Preview)** alert, Microsoft supports only permissions that are assigned at the subscription level. +| Alert | Severity | Trigger | Recommendation | +| --- | --- | --- | --- | +| **Too many owners assigned to a resource** |Medium |Too many users have the owner role. |Review the users in the list and reassign some to less privileged roles. | +| **Too many permanent owners assigned to a resource** |Medium |Too many users are permanently assigned to a role. |Review the users in the list and re-assign some to require activation for role use. | +| **Duplicate role created** |Medium |Multiple roles have the same criteria. |Use only one of these roles. | ### Severity diff --git a/articles/active-directory/roles/security-planning.md b/articles/active-directory/roles/security-planning.md index 82e5b4ebba39..83992b5189cb 100644 --- a/articles/active-directory/roles/security-planning.md +++ b/articles/active-directory/roles/security-planning.md @@ -252,7 +252,7 @@ Attackers might try to target privileged accounts so that they can disrupt the i * Impersonation attacks * Credential theft attacks such as keystroke logging, Pass-the-Hash, and Pass-The-Ticket -By deploying privileged access workstations, you can reduce the risk that administrators enter their credentials in a desktop environment that hasn't been hardened. For more information, see [Privileged Access Workstations](https://4sysops.com/archives/understand-the-microsoft-privileged-access-workstation-paw-security-model/). +By deploying privileged access workstations, you can reduce the risk that administrators enter their credentials in a desktop environment that hasn't been hardened. For more information, see [Privileged Access Workstations](/security/compass/overview). #### Review National Institute of Standards and Technology recommendations for handling incidents diff --git a/articles/active-directory/saas-apps/battery-management-information-system-tutorial.md b/articles/active-directory/saas-apps/battery-management-information-system-tutorial.md new file mode 100644 index 000000000000..3c3fceb466e9 --- /dev/null +++ b/articles/active-directory/saas-apps/battery-management-information-system-tutorial.md @@ -0,0 +1,139 @@ +--- +title: 'Tutorial: Azure AD SSO integration with BMIS - Battery Management Information System' +description: Learn how to configure single sign-on between Azure Active Directory and BMIS - Battery Management Information System. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/27/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with BMIS - Battery Management Information System + +In this tutorial, you'll learn how to integrate BMIS - Battery Management Information System with Azure Active Directory (Azure AD). When you integrate BMIS - Battery Management Information System with Azure AD, you can: + +* Control in Azure AD who has access to BMIS - Battery Management Information System. +* Enable your users to be automatically signed-in to BMIS - Battery Management Information System with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* BMIS - Battery Management Information System single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* BMIS - Battery Management Information System supports **IDP** initiated SSO. + +## Add BMIS - Battery Management Information System from the gallery + +To configure the integration of BMIS - Battery Management Information System into Azure AD, you need to add BMIS - Battery Management Information System from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **BMIS - Battery Management Information System** in the search box. +1. Select **BMIS - Battery Management Information System** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for BMIS - Battery Management Information System + +Configure and test Azure AD SSO with BMIS - Battery Management Information System using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in BMIS - Battery Management Information System. + +To configure and test Azure AD SSO with BMIS - Battery Management Information System, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure BMIS - Battery Management Information System SSO](#configure-bmis---battery-management-information-system-sso)** - to configure the single sign-on settings on application side. + 1. **[Create BMIS - Battery Management Information System test user](#create-bmis---battery-management-information-system-test-user)** - to have a counterpart of B.Simon in BMIS - Battery Management Information System that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **BMIS - Battery Management Information System** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic SAML Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, the application is pre-configured and the necessary URLs are already pre-populated with Azure. The user needs to save the configuration by clicking the **Save** button. + +1. BMIS - Battery Management Information System application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. + + ![Screenshot shows the Battery Management Information System application image.](common/default-attributes.png "Image") + +1. In addition to above, BMIS - Battery Management Information System application expects few more attributes to be passed back in SAML response, which are shown below. These attributes are also pre populated but you can review them as per your requirement. + + | Name | Source Attribute | + |-------| --------- | + | email | user.mail | + | first_name | user.givenname | + | last_name | user.surname | + | user_name | user.mail | + +1. On the **Set-up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Federation Metadata XML** and select **Download** to download the certificate and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") + +1. On the **Set up BMIS - Battery Management Information System** section, copy the appropriate URL(s) based on your requirement. + + ![Screenshot shows to copy configuration appropriate URLs.](common/copy-configuration-urls.png "Attributes") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to BMIS - Battery Management Information System. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **BMIS - Battery Management Information System**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure BMIS - Battery Management Information System SSO + +To configure single sign-on on **BMIS - Battery Management Information System** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [BMIS - Battery Management Information System support team](mailto:bmissupport@midtronics.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create BMIS - Battery Management Information System test user + +In this section, you create a user called Britta Simon in BMIS - Battery Management Information System. Work with [BMIS - Battery Management Information System support team](mailto:bmissupport@midtronics.com) to add the users in the BMIS - Battery Management Information System platform. Users must be created and activated before you use single sign-on. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +* Click on Test this application in Azure portal and you should be automatically signed in to the BMIS - Battery Management Information System for which you set up the SSO. + +* You can use Microsoft My Apps. When you click the BMIS - Battery Management Information System tile in the My Apps, you should be automatically signed in to the BMIS - Battery Management Information System for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). + +## Next steps + +Once you configure BMIS - Battery Management Information System you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/e2open-lsp-tutorial.md b/articles/active-directory/saas-apps/e2open-lsp-tutorial.md new file mode 100644 index 000000000000..2eb7773c88d2 --- /dev/null +++ b/articles/active-directory/saas-apps/e2open-lsp-tutorial.md @@ -0,0 +1,136 @@ +--- +title: 'Tutorial: Azure AD SSO integration with E2open LSP' +description: Learn how to configure single sign-on between Azure Active Directory and E2open LSP. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/23/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with E2open LSP + +In this tutorial, you'll learn how to integrate E2open LSP with Azure Active Directory (Azure AD). When you integrate E2open LSP with Azure AD, you can: + +* Control in Azure AD who has access to E2open LSP. +* Enable your users to be automatically signed-in to E2open LSP with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* E2open LSP single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* E2open LSP supports **SP** initiated SSO. + +## Add E2open LSP from the gallery + +To configure the integration of E2open LSP into Azure AD, you need to add E2open LSP from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **E2open LSP** in the search box. +1. Select **E2open LSP** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for E2open LSP + +Configure and test Azure AD SSO with E2open LSP using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in E2open LSP. + +To configure and test Azure AD SSO with E2open LSP, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure E2open LSP SSO](#configure-e2open-lsp-sso)** - to configure the single sign-on settings on application side. + 1. **[Create E2open LSP test user](#create-e2open-lsp-test-user)** - to have a counterpart of B.Simon in E2open LSP that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **E2open LSP** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, perform the following steps: + + a. In the **Identifier** text box, type a URL using the following pattern: + `https://-.tms-lsp.blujaysolutions.net/navi/saml/metadata` + + b. In the **Reply URL** text box, type a URL using the following pattern: + `https://-.tms-lsp.blujaysolutions.net/navi/sam` + + c. In the **Sign-on URL** text box, type a URL using the following pattern: + `https://-.tms-lsp.blujaysolutions.net/navi/` + + > [!NOTE] + > These values are not real. Update these values with the actual Identifier, Reply URL and Sign-on URL. Contact [E2open LSP Client support team](mailto:customersupport@e2open.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + +1. On the **Set up single sign-on with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/copy-metadataurl.png "Certificate") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to E2open LSP. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **E2open LSP**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure E2open LSP SSO + +To configure single sign-on on **E2open LSP** side, you need to send the **App Federation Metadata Url** to [E2open LSP support team](mailto:customersupport@e2open.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create E2open LSP test user + +In this section, you create a user called Britta Simon in E2open LSP. Work with [E2open LSP support team](mailto:customersupport@e2open.com) to add the users in the E2open LSP platform. Users must be created and activated before you use single sign-on. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +* Click on **Test this application** in Azure portal. This will redirect to E2open LSP Sign-on URL where you can initiate the login flow. + +* Go to E2open LSP Sign-on URL directly and initiate the login flow from there. + +* You can use Microsoft My Apps. When you click the E2open LSP tile in the My Apps, this will redirect to E2open LSP Sign-on URL. For more information about the My Apps, see [Introduction to the My Apps](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). + +## Next steps + +Once you configure E2open LSP you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-any-app). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/forcepoint-cloud-security-gateway-tutorial.md b/articles/active-directory/saas-apps/forcepoint-cloud-security-gateway-tutorial.md index fdb779149a13..85abe932b97e 100644 --- a/articles/active-directory/saas-apps/forcepoint-cloud-security-gateway-tutorial.md +++ b/articles/active-directory/saas-apps/forcepoint-cloud-security-gateway-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 04/19/2022 +ms.date: 05/26/2022 ms.author: jeedes --- @@ -72,7 +72,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, perform the following steps: @@ -85,13 +85,13 @@ Follow these steps to enable Azure AD SSO in the Azure portal. c. In the **Sign-on URL** text box, type the URL: `https://mailcontrol.com` -1. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. +1. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 1. On the **Set up Forcepoint Cloud Security Gateway - User Authentication** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Authentication") ### Create an Azure AD test user @@ -131,7 +131,7 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a b. Select **Identity provider** from the dropdown. - c. Open the downloaded **Certificate (Base64)** from the Azure portal and upload the file into the **File upload** textbox by clicking **Browse** option. + c. Upload the **Federation Metadata XML** file from the Azure portal into the **File upload** textbox by clicking **Browse** option. d. Click **Save**. diff --git a/articles/active-directory/saas-apps/github-ae-tutorial.md b/articles/active-directory/saas-apps/github-ae-tutorial.md index d4889f0696b6..12802e940cd9 100644 --- a/articles/active-directory/saas-apps/github-ae-tutorial.md +++ b/articles/active-directory/saas-apps/github-ae-tutorial.md @@ -1,6 +1,6 @@ --- -title: 'Tutorial: Azure Active Directory single sign-on (SSO) integration with GitHub AE | Microsoft Docs' -description: Learn how to configure single sign-on between Azure Active Directory and GitHub AE. +title: 'Tutorial: Azure AD SSO integration with GitHub Enterprise Server' +description: Learn how to configure single sign-on between Azure Active Directory and GitHub Enterprise Server. services: active-directory author: jeevansd manager: CelesteDG @@ -9,16 +9,16 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 08/31/2021 +ms.date: 05/20/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory single sign-on (SSO) integration with GitHub AE +# Tutorial: Azure AD SSO integration with GitHub Enterprise Server -In this tutorial, you'll learn how to integrate GitHub AE with Azure Active Directory (Azure AD). When you integrate GitHub AE with Azure AD, you can: +In this tutorial, you'll learn how to integrate GitHub Enterprise Server with Azure Active Directory (Azure AD). When you integrate GitHub Enterprise Server with Azure AD, you can: -* Control in Azure AD who has access to GitHub AE. -* Enable your users to be automatically signed-in to GitHub AE with their Azure AD accounts. +* Control in Azure AD who has access to GitHub Enterprise Server. +* Enable your users to be automatically signed-in to GitHub Enterprise Server with their Azure AD accounts. * Manage your accounts in one central location - the Azure portal. ## Prerequisites @@ -26,52 +26,53 @@ In this tutorial, you'll learn how to integrate GitHub AE with Azure Active Dire To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). -* GitHub AE, ready for [initialization](https://docs.github.com/github-ae@latest/admin/configuration/initializing-github-ae). +* GitHub Enterprise Server, ready for [initialization](https://docs.github.com/github-ae@latest/admin/configuration/initializing-github-ae). +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. -* GitHub AE supports **SP** and **IDP** initiated SSO. -* GitHub AE supports **Just In Time** user provisioning. -* GitHub AE supports [Automated user provisioning](github-ae-provisioning-tutorial.md). +* GitHub Enterprise Server supports **SP** and **IDP** initiated SSO. +* GitHub Enterprise Server supports **Just In Time** user provisioning. +* GitHub Enterprise Server supports [Automated user provisioning](github-ae-provisioning-tutorial.md). -## Adding GitHub AE from the gallery +## Adding GitHub Enterprise Server from the gallery -To configure the integration of GitHub AE into Azure AD, you need to add GitHub AE from the gallery to your list of managed SaaS apps. +To configure the integration of GitHub Enterprise Server into Azure AD, you need to add GitHub Enterprise Server from the gallery to your list of managed SaaS apps. 1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. 1. On the left navigation pane, select the **Azure Active Directory** service. 1. Navigate to **Enterprise Applications** and then select **All Applications**. 1. To add new application, select **New application**. -1. In the **Add from the gallery** section, type **GitHub AE** in the search box. -1. Select **GitHub AE** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. +1. In the **Add from the gallery** section, type **GitHub Enterprise Server** in the search box. +1. Select **GitHub Enterprise Server** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. +## Configure and test Azure AD SSO for GitHub Enterprise Server -## Configure and test Azure AD SSO for GitHub AE +Configure and test Azure AD SSO with GitHub Enterprise Server using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in GitHub Enterprise Server. -Configure and test Azure AD SSO with GitHub AE using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in GitHub AE. - -To configure and test Azure AD SSO with GitHub AE, complete the following building blocks: +To configure and test Azure AD SSO with GitHub Enterprise Server, perform the following steps: 1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. -1. **[Configure GitHub AE SSO](#configure-github-ae-sso)** - to configure the single sign-on settings on application side. - 1. **[Create GitHub AE test user](#create-github-ae-test-user)** - to have a counterpart of B.Simon in GitHub AE that is linked to the Azure AD representation of user. +1. **[Configure GitHub Enterprise Server SSO](#configure-github-enterprise-server-sso)** - to configure the single sign-on settings on application side. + 1. **[Create GitHub Enterprise Server test user](#create-github-enterprise-server-test-user)** - to have a counterpart of B.Simon in GitHub Enterprise Server that is linked to the Azure AD representation of user. 1. **[Test SSO](#test-sso)** - to verify whether the configuration works. ## Configure Azure AD SSO Follow these steps to enable Azure AD SSO in the Azure portal. -1. In the Azure portal, on the **GitHub AE** application integration page, find the **Manage** section and select **single sign-on**. +1. In the Azure portal, on the **GitHub Enterprise Server** application integration page, find the **Manage** section and select **single sign-on**. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") -1. On the **Basic SAML Configuration** section, if you wish to configure the application in **IDP** initiated mode, enter the values for the following fields: +1. On the **Basic SAML Configuration** section, if you wish to configure the application in **IDP** initiated mode, perform the following steps: a. In the **Identifier (Entity ID)** text box, type a URL using the following pattern: `https://` @@ -85,12 +86,11 @@ Follow these steps to enable Azure AD SSO in the Azure portal. `https:///sso` > [!NOTE] - > These values are not real. Update these values with the actual Sign on URL, Reply URL and Identifier. Contact [GitHub AE Client support team](mailto:support@github.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. - + > These values are not real. Update these values with the actual Identifier, Reply URL and Sign on URL. Contact [GitHub Enterprise Server Client support team](mailto:support@github.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. -1. GitHub AE application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. +1. GitHub Enterprise Server application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. - ![image](common/default-attributes.png) + ![Screenshot shows the image of Enterprise Server application.](common/default-attributes.png "Attributes") 1. Edit **User Attributes & Claims**. @@ -104,18 +104,18 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. Click **Save**. - ![manage claim](./media/github-ae-tutorial/administrator.png) + ![Screenshot shows to manage claim for attributes.](./media/github-ae-tutorial/administrator.png "Claims") > [!NOTE] > To know the instructions on how to add a claim, please follow the [link](https://docs.github.com/en/github-ae@latest/admin/authentication/configuring-authentication-and-provisioning-for-your-enterprise-using-azure-ad). 1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") -1. On the **Set up GitHub AE** section, copy the appropriate URL(s) based on your requirement. +1. On the **Set up GitHub Enterprise Server** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Metadata") ### Create an Azure AD test user @@ -131,25 +131,25 @@ In this section, you'll create a test user in the Azure portal called B.Simon. ### Assign the Azure AD test user -In this section, you'll enable B.Simon to use Azure single sign-on by granting access to GitHub AE. +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to GitHub Enterprise Server. 1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. -1. In the applications list, select **GitHub AE**. +1. In the applications list, select **GitHub Enterprise Server**. 1. In the app's overview page, find the **Manage** section and select **Users and groups**. 1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. 1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. 1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. 1. In the **Add Assignment** dialog, click the **Assign** button. -## Configure GitHub AE SSO +## Configure GitHub Enterprise Server SSO -To configure SSO on GitHub AE side, you need to follow the instructions mentioned [here](https://docs.github.com/github-ae@latest/admin/authentication/configuring-saml-single-sign-on-for-your-enterprise#enabling-saml-sso). +To configure SSO on GitHub Enterprise Server side, you need to follow the instructions mentioned [here](https://docs.github.com/github-ae@latest/admin/authentication/configuring-saml-single-sign-on-for-your-enterprise#enabling-saml-sso). -### Create GitHub AE test user +### Create GitHub Enterprise Server test user -In this section, a user called B.Simon is created in GitHub AE. GitHub AE supports just-in-time user provisioning, which is enabled by default. There is no action item for you in this section. If a user doesn't already exist in GitHub AE, a new one is created after authentication. +In this section, a user called B.Simon is created in GitHub Enterprise Server. GitHub Enterprise Server supports just-in-time user provisioning, which is enabled by default. There is no action item for you in this section. If a user doesn't already exist in GitHub Enterprise Server, a new one is created after authentication. -GitHub AE also supports automatic user provisioning, you can find more details [here](./github-ae-provisioning-tutorial.md) on how to configure automatic user provisioning. +GitHub Enterprise Server also supports automatic user provisioning, you can find more details [here](./github-ae-provisioning-tutorial.md) on how to configure automatic user provisioning. ## Test SSO @@ -157,18 +157,18 @@ In this section, you test your Azure AD single sign-on configuration with follow #### SP initiated: -* Click on **Test this application** in Azure portal. This will redirect to GitHub AE Sign on URL where you can initiate the login flow. +* Click on **Test this application** in Azure portal. This will redirect to GitHub Enterprise Server Sign on URL where you can initiate the login flow. -* Go to GitHub AE Sign-on URL directly and initiate the login flow from there. +* Go to GitHub Enterprise Server Sign-on URL directly and initiate the login flow from there. #### IDP initiated: -* Click on **Test this application** in Azure portal and you should be automatically signed in to the GitHub AE for which you set up the SSO +* Click on **Test this application** in Azure portal and you should be automatically signed in to the GitHub Enterprise Server for which you set up the SSO. -You can also use Microsoft My Apps to test the application in any mode. When you click the GitHub AE tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the GitHub AE for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). +You can also use Microsoft My Apps to test the application in any mode. When you click the GitHub Enterprise Server tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the GitHub Enterprise Server for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). ## Next steps * [Configuring user provisioning for your enterprise](https://docs.github.com/github-ae@latest/admin/authentication/configuring-user-provisioning-for-your-enterprise). -* Once you configure GitHub AE you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-any-app). +* Once you configure GitHub Enterprise Server you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-any-app). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/media/standard-for-success-tutorial/name.png b/articles/active-directory/saas-apps/media/standard-for-success-tutorial/name.png new file mode 100644 index 000000000000..eb660314b9fe Binary files /dev/null and b/articles/active-directory/saas-apps/media/standard-for-success-tutorial/name.png differ diff --git a/articles/active-directory/saas-apps/media/standard-for-success-tutorial/settings.png b/articles/active-directory/saas-apps/media/standard-for-success-tutorial/settings.png new file mode 100644 index 000000000000..a6a25b8ff105 Binary files /dev/null and b/articles/active-directory/saas-apps/media/standard-for-success-tutorial/settings.png differ diff --git a/articles/active-directory/saas-apps/s4-digitsec-tutorial.md b/articles/active-directory/saas-apps/s4-digitsec-tutorial.md new file mode 100644 index 000000000000..732170a416bb --- /dev/null +++ b/articles/active-directory/saas-apps/s4-digitsec-tutorial.md @@ -0,0 +1,142 @@ +--- +title: 'Tutorial: Azure AD SSO integration with S4 - Digitsec' +description: Learn how to configure single sign-on between Azure Active Directory and S4 - Digitsec. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: celested +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/23/2022 +ms.author: jeedes +--- + +# Tutorial: Azure AD SSO integration with S4 - Digitsec + +In this tutorial, you'll learn how to integrate S4 - Digitsec with Azure Active Directory (Azure AD). When you integrate S4 - Digitsec with Azure AD, you can: + +* Control in Azure AD who has access to S4 - Digitsec. +* Enable your users to be automatically signed-in to S4 - Digitsec with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* S4 - Digitsec single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* S4 - Digitsec supports **SP and IDP** initiated SSO. +* S4 - Digitsec supports **Just In Time** user provisioning. + +> [!NOTE] +> Identifier of this application is a fixed string value so only one instance can be configured in one tenant. + +## Add S4 - Digitsec from the gallery + +To configure the integration of S4 - Digitsec into Azure AD, you need to add S4 - Digitsec from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **S4 - Digitsec** in the search box. +1. Select **S4 - Digitsec** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for S4 - Digitsec + +Configure and test Azure AD SSO with S4 - Digitsec using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in S4 - Digitsec. + +To configure and test Azure AD SSO with S4 - Digitsec, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure S4 - Digitsec SSO](#configure-s4---digitsec-sso)** - to configure the single sign-on settings on application side. + 1. **[Create S4 - Digitsec test user](#create-s4---digitsec-test-user)** - to have a counterpart of B.Simon in S4 - Digitsec that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **S4 - Digitsec** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, the user does not have to perform any step as the app is already pre-integrated with Azure. + +1. Click **Set additional URLs** and perform the following step if you wish to configure the application in **SP** initiated mode: + + In the **Sign-on URL** text box, type the URL: + `https://s4.digitsec.com` + +1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") + +1. On the **Set up S4 - Digitsec** section, copy the appropriate URL(s) based on your requirement. + + ![Screenshot shows to copy configuration appropriate U R L.](common/copy-configuration-urls.png "Attributes") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to S4 - Digitsec. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **S4 - Digitsec**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you're expecting any role value in the SAML assertion, in the **Select Role** dialog, select the appropriate role for the user from the list and then click the **Select** button at the bottom of the screen. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure S4 - Digitsec SSO + +To configure single sign-on on S4 - Digitsec side, you need to send the downloaded **Certificate (Base64)** and appropriate copied URLs from Azure portal to [S4 - Digitsec support team](mailto:Support@digitsec.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create S4 - Digitsec test user + +In this section, a user called B.Simon is created in S4 - Digitsec. S4 - Digitsec supports just-in-time user provisioning, which is enabled by default. There is no action item for you in this section. If a user doesn't already exist in S4 - Digitsec, a new one is created after authentication. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +#### SP initiated: + +* Click on **Test this application** in Azure portal. This will redirect to S4 - Digitsec Sign on URL where you can initiate the login flow. + +* Go to S4 - Digitsec Sign-on URL directly and initiate the login flow from there. + +#### IDP initiated: + +* Click on **Test this application** in Azure portal and you should be automatically signed in to the S4 - Digitsec for which you set up the SSO. + +You can also use Microsoft My Apps to test the application in any mode. When you click the S4 - Digitsec tile in the My Apps, if configured in SP mode you would be redirected to the application sign-on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the S4 - Digitsec for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). + +## Next steps + +Once you configure S4 - Digitsec you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/standard-for-success-tutorial.md b/articles/active-directory/saas-apps/standard-for-success-tutorial.md new file mode 100644 index 000000000000..680bd4d19d3a --- /dev/null +++ b/articles/active-directory/saas-apps/standard-for-success-tutorial.md @@ -0,0 +1,185 @@ +--- +title: 'Tutorial: Azure AD SSO integration with Standard for Success K-12' +description: Learn how to configure single sign-on between Azure Active Directory and Standard for Success K-12. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/27/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with Standard for Success K-12 + +In this tutorial, you'll learn how to integrate Standard for Success K-12 with Azure Active Directory (Azure AD). When you integrate Standard for Success K-12 with Azure AD, you can: + +* Control in Azure AD who has access to Standard for Success K-12. +* Enable your users to be automatically signed-in to Standard for Success K-12 with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Standard for Success K-12 single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* Standard for Success K-12 supports **SP** and **IDP** initiated SSO. + +## Add Standard for Success K-12 from the gallery + +To configure the integration of Standard for Success K-12 into Azure AD, you need to add Standard for Success K-12 from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Standard for Success K-12** in the search box. +1. Select **Standard for Success K-12** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for Standard for Success K-12 + +Configure and test Azure AD SSO with Standard for Success K-12 using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Standard for Success K-12. + +To configure and test Azure AD SSO with Standard for Success K-12, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Standard for Success K-12 SSO](#configure-standard-for-success-k-12-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Standard for Success K-12 test user](#create-standard-for-success-k-12-test-user)** - to have a counterpart of B.Simon in Standard for Success K-12 that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **Standard for Success K-12** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic SAML Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, perform the following steps: + + a. In the **Identifier** text box, type a value using the following pattern: + `api://` + + b. In the **Reply URL** text box, type a URL using the following pattern: + `https://edu.standardforsuccess.com/access/mssaml_consume?did=` + +1. Click **Set additional URLs** and perform the following steps if you wish to configure the application in SP initiated mode: + + a. In the **Sign-on URL** text box, type a URL using the following pattern: + `https://edu.standardforsuccess.com/access/mssaml_int?did=` + + b. In the **Relay State** text box, type a URL using the following pattern: + `https://edu.standardforsuccess.com/access/mssaml_consume?did=` + + > [!Note] + > These values are not real. Update these values with the actual Identifier, Reply URL, Sign-on URL and Relay State. Contact [Standard for Success K-12 Client support team](mailto:help@standardforsuccess.com) to get the INSTITUTION-ID value. You can also refer to the patterns shown in the Basic SAML Configuration section in the Azure portal. + +1. In the **SAML Signing Certificate** section, click **Edit** button to open **SAML Signing Certificate** dialog. + + ![Screenshot shows to edit SAML Signing Certificate.](common/edit-certificate.png "Signing Certificate") + +1. In the **SAML Signing Certificate** section, copy the **Thumbprint Value** and save it on your computer. + + ![Screenshot shows to copy thumbprint value.](common/copy-thumbprint.png "Thumbprint") + +1. On the **Set up Standard for Success K-12** section, copy the appropriate URL(s) based on your requirement. + + ![Screenshot shows to copy configuration appropriate U R L.](common/copy-configuration-urls.png "Attributes") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Standard for Success K-12. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Standard for Success K-12**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure Standard for Success K-12 SSO + +1. Log in to your Standard for Success K-12 company site as an administrator with superuser access. + +1. From the menu, navigate to **Utilities** -> **Tools & Features**. + +1. Scroll down to **Single Sign On Settings** and click the **Microsoft Azure Single Sign On** link and perform the following steps: + + ![Screenshot that shows the Configuration Settings.](./media/standard-for-success-tutorial/settings.png "Configuration") + + a. Select **Enable Azure Single Sign On** checkbox. + + b. In the **Login URL** textbox, paste the **Login URL** value which you have copied from the Azure portal. + + c. In the **Azure AD Identifier** textbox, paste the **Azure AD Identifier** value which you have copied from the Azure portal. + + d. Fill the **Application ID** in the **Application ID** text box. + + e. In the **Certificate Thumbprint** text box, paste the **Thumbprint Value** that you copied from the Azure portal. + + f. Click **Save**. + +### Create Standard for Success K-12 test user + +1. In a different web browser window, log into your Standard for Success K-12 website as an administrator with superuser privileges. + +1. From the menu, navigate to **Utilites** -> **Accounts Manager**, then click **Create New User** and perform the following steps: + + ![Screenshot that shows the User Information fields.](./media/standard-for-success-tutorial/name.png "User Information") + + a. In **First Name** text box, enter the first name of the user. + + b. In **Last Name** text box, enter the last name of the user. + + c. In **Email** text box, enter the email address which you have added within Azure. + + d. Scroll to the bottom and Click **Create User**. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +#### SP initiated: + +* Click on **Test this application** in Azure portal. This will redirect to Standard for Success K-12 Sign on URL where you can initiate the login flow. + +* Go to Standard for Success K-12 Sign-on URL directly and initiate the login flow from there. + +#### IDP initiated: + +* Click on **Test this application** in Azure portal and you should be automatically signed in to the Standard for Success K-12 for which you set up the SSO. + +You can also use Microsoft My Apps to test the application in any mode. When you click the Standard for Success K-12 tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the Standard for Success K-12 for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). + +## Next steps + +Once you configure Standard for Success K-12 you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/toc.yml b/articles/active-directory/saas-apps/toc.yml index e2b52db3eaf4..e6c745771137 100644 --- a/articles/active-directory/saas-apps/toc.yml +++ b/articles/active-directory/saas-apps/toc.yml @@ -247,6 +247,8 @@ href: baldwin-safety-&-compliance-tutorial.md - name: Balsamiq Wireframes href: balsamiq-wireframes-tutorial.md + - name: BMIS - Battery Management Information System + href: battery-management-information-system-tutorial.md - name: BC in the Cloud href: bcinthecloud-tutorial.md - name: Bealink @@ -739,6 +741,8 @@ href: envimmis-tutorial.md - name: Envoy href: envoy-tutorial.md + - name: E2open LSP + href: e2open-lsp-tutorial.md - name: EPHOTO DAM href: ephoto-dam-tutorial.md - name: ePlatform @@ -926,7 +930,7 @@ href: gigya-tutorial.md - name: GitHub href: github-tutorial.md - - name: GitHub AE + - name: GitHub Enterprise Server href: github-ae-tutorial.md - name: GitHub Enterprise Cloud - Enterprise Account href: github-enterprise-cloud-enterprise-account-tutorial.md @@ -1942,6 +1946,8 @@ href: screensteps-tutorial.md - name: Scuba Analytics href: scuba-analytics-tutorial.md + - name: S4 - Digitsec + href: s4-digitsec-tutorial.md - name: SD Elements href: sd-elements-tutorial.md - name: SDS & Chemical Information Management @@ -2114,6 +2120,8 @@ href: ssogen-tutorial.md - name: Standard for Success Accreditation href: standard-for-success-accreditation-tutorial.md + - name: Standard for Success K-12 + href: standard-for-success-tutorial.md - name: Starmind href: starmind-tutorial.md - name: StatusPage @@ -2294,6 +2302,8 @@ href: torii-tutorial.md - name: TruNarrative href: trunarrative-tutorial.md + - name: TVU Service + href: tvu-service-tutorial.md - name: Tulip href: tulip-tutorial.md - name: TurboRater diff --git a/articles/active-directory/saas-apps/tvu-service-tutorial.md b/articles/active-directory/saas-apps/tvu-service-tutorial.md new file mode 100644 index 000000000000..f10705cd4835 --- /dev/null +++ b/articles/active-directory/saas-apps/tvu-service-tutorial.md @@ -0,0 +1,135 @@ +--- +title: 'Tutorial: Azure AD SSO integration with TVU Service' +description: Learn how to configure single sign-on between Azure Active Directory and TVU Service. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/21/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with TVU Service + +In this tutorial, you'll learn how to integrate TVU Service with Azure Active Directory (Azure AD). When you integrate TVU Service with Azure AD, you can: + +* Control in Azure AD who has access to TVU Service. +* Enable your users to be automatically signed-in to TVU Service with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* TVU Service single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* TVU Service supports **IDP** initiated SSO. + +## Add TVU Service from the gallery + +To configure the integration of TVU Service into Azure AD, you need to add TVU Service from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **TVU Service** in the search box. +1. Select **TVU Service** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for TVU Service + +Configure and test Azure AD SSO with TVU Service using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in TVU Service. + +To configure and test Azure AD SSO with TVU Service, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure TVU Service SSO](#configure-tvu-service-sso)** - to configure the single sign-on settings on application side. + 1. **[Create TVU Service test user](#create-tvu-service-test-user)** - to have a counterpart of B.Simon in TVU Service that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **TVU Service** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, the application is pre-configured and the necessary URLs are already pre-populated with Azure. The user needs to save the configuration by clicking the **Save** button. + +1. TVU Service application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. + + ![Screenshot shows the image of TVU Service application.](common/default-attributes.png "Attributes") + +1. In addition to above, TVU Service application expects few more attributes to be passed back in SAML response, which are shown below. These attributes are also pre populated but you can review them as per your requirements. + + | Name | Source Attribute| + | ------------ | --------- | + | surname | user.surname | + | firstName | user.givenname | + | lastName | user.surname | + | email | user.mail | + +1. On the **Set up single sign-on with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/copy-metadataurl.png "Certificate") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to TVU Service. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **TVU Service**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure TVU Service SSO + +To configure single sign-on on **TVU Service** side, you need to send the **App Federation Metadata Url** to [TVU Service support team](mailto:support@tvunetworks.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create TVU Service test user + +In this section, you create a user called Britta Simon in TVU Service. Work with [TVU Service support team](mailto:support@tvunetworks.com) to add the users in the TVU Service platform. Users must be created and activated before you use single sign-on. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +* Click on Test this application in Azure portal and you should be automatically signed in to the TVU Service for which you set up the SSO. + +* You can use Microsoft My Apps. When you click the TVU Service tile in the My Apps, you should be automatically signed in to the TVU Service for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). + +## Next steps + +Once you configure TVU Service you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-any-app). \ No newline at end of file diff --git a/articles/active-directory/verifiable-credentials/credential-design.md b/articles/active-directory/verifiable-credentials/credential-design.md index 8b7a6b91d529..136e6bc56e1d 100644 --- a/articles/active-directory/verifiable-credentials/credential-design.md +++ b/articles/active-directory/verifiable-credentials/credential-design.md @@ -14,6 +14,8 @@ ms.author: barclayn # How to customize your verifiable credentials (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + Verifiable credentials are made up of two components, the rules and display files. The rules file determines what the user needs to provide before they receive a verifiable credential. The display file controls the branding of the credential and styling of the claims. In this guide, we will explain how to modify both files to meet the requirements of your organization. > [!IMPORTANT] diff --git a/articles/active-directory/verifiable-credentials/decentralized-identifier-overview.md b/articles/active-directory/verifiable-credentials/decentralized-identifier-overview.md index 6890ecc20347..f9b0cddbbf7e 100644 --- a/articles/active-directory/verifiable-credentials/decentralized-identifier-overview.md +++ b/articles/active-directory/verifiable-credentials/decentralized-identifier-overview.md @@ -15,6 +15,8 @@ ms.reviewer: # Introduction to Azure Active Directory Verifiable Credentials (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + > [!IMPORTANT] > Azure Active Directory Verifiable Credentials is currently in public preview. > This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. @@ -96,7 +98,7 @@ The scenario we use to explain how VCs work involves: Today, Alice provides a username and password to log onto Woodgrove’s networked environment. Woodgrove is deploying a verifiable credential solution to provide a more manageable way for Alice to prove that she is an employee of Woodgrove. Proseware accepts verifiable credentials issued by Woodgrove as proof of employment to offer corporate discounts as part of their corporate discount program. -Alice requests Woodgrove Inc for a proof of employment verifiable credential. Woodgrove Inc attests Alice's identiy and issues a signed verfiable credential that Alice can accept and store in her digital wallet application. Alice can now present this verifiable credential as a proof of employement on the Proseware site. After a succesfull presentation of the credential, Prosware offers discount to Alice and the transaction is logged in Alice's wallet application so that she can track where and to whom she has presented her proof of employment verifiable credential. +Alice requests Woodgrove Inc for a proof of employment verifiable credential. Woodgrove Inc attests Alice's identity and issues a signed verfiable credential that Alice can accept and store in her digital wallet application. Alice can now present this verifiable credential as a proof of employement on the Proseware site. After a succesfull presentation of the credential, Prosware offers discount to Alice and the transaction is logged in Alice's wallet application so that she can track where and to whom she has presented her proof of employment verifiable credential. ![microsoft-did-overview](media/decentralized-identifier-overview/did-overview.png) diff --git a/articles/active-directory/verifiable-credentials/get-started-request-api.md b/articles/active-directory/verifiable-credentials/get-started-request-api.md index 69735ef9b641..be6bde743798 100644 --- a/articles/active-directory/verifiable-credentials/get-started-request-api.md +++ b/articles/active-directory/verifiable-credentials/get-started-request-api.md @@ -16,6 +16,8 @@ ms.author: barclayn # Request Service REST API (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + Azure Active Directory (Azure AD) Verifiable Credentials includes the Request Service REST API. This API allows you to issue and verify credentials. This article shows you how to start using the Request Service REST API. > [!IMPORTANT] diff --git a/articles/active-directory/verifiable-credentials/how-to-create-a-free-developer-account.md b/articles/active-directory/verifiable-credentials/how-to-create-a-free-developer-account.md index d536c8818e30..bf652ab7b48c 100644 --- a/articles/active-directory/verifiable-credentials/how-to-create-a-free-developer-account.md +++ b/articles/active-directory/verifiable-credentials/how-to-create-a-free-developer-account.md @@ -14,6 +14,8 @@ ms.author: barclayn # How to create a free Azure Active Directory developer tenant +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + > [!IMPORTANT] > Azure Active Directory Verifiable Credentials is currently in public preview. > This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. diff --git a/articles/active-directory/verifiable-credentials/how-to-dnsbind.md b/articles/active-directory/verifiable-credentials/how-to-dnsbind.md index 4ff5751dffb6..130809e7d03b 100644 --- a/articles/active-directory/verifiable-credentials/how-to-dnsbind.md +++ b/articles/active-directory/verifiable-credentials/how-to-dnsbind.md @@ -15,6 +15,8 @@ ms.author: barclayn # Link your domain to your Decentralized Identifier (DID) (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + > [!IMPORTANT] > Azure Active Directory Verifiable Credentials is currently in public preview. > This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. diff --git a/articles/active-directory/verifiable-credentials/how-to-issuer-revoke.md b/articles/active-directory/verifiable-credentials/how-to-issuer-revoke.md index 804bf9560bef..a2b9887454cb 100644 --- a/articles/active-directory/verifiable-credentials/how-to-issuer-revoke.md +++ b/articles/active-directory/verifiable-credentials/how-to-issuer-revoke.md @@ -15,6 +15,8 @@ ms.author: barclayn # Revoke a previously issued verifiable credential (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + As part of the process of working with verifiable credentials (VCs), you not only have to issue credentials, but sometimes you also have to revoke them. In this article we go over the **Status** property part of the VC specification and take a closer look at the revocation process, why we may want to revoke credentials and some data and privacy implications. > [!IMPORTANT] diff --git a/articles/active-directory/verifiable-credentials/how-to-opt-out.md b/articles/active-directory/verifiable-credentials/how-to-opt-out.md index 5f099618df8f..d1cc625b7f6f 100644 --- a/articles/active-directory/verifiable-credentials/how-to-opt-out.md +++ b/articles/active-directory/verifiable-credentials/how-to-opt-out.md @@ -15,6 +15,8 @@ ms.author: barclayn # Opt out of the verifiable credentials (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + In this article: - The reason why you may need to opt out. diff --git a/articles/active-directory/verifiable-credentials/introduction-to-verifiable-credentials-architecture.md b/articles/active-directory/verifiable-credentials/introduction-to-verifiable-credentials-architecture.md index 46cac82dc69a..f564a0a8a59c 100644 --- a/articles/active-directory/verifiable-credentials/introduction-to-verifiable-credentials-architecture.md +++ b/articles/active-directory/verifiable-credentials/introduction-to-verifiable-credentials-architecture.md @@ -13,6 +13,8 @@ ms.author: baselden # Azure AD Verifiable Credentials architecture overview (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + > [!IMPORTANT] > Azure Active Directory Verifiable Credentials is currently in public preview. This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. For more information, see [**Supplemental Terms of Use for Microsoft Azure Previews**](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). diff --git a/articles/active-directory/verifiable-credentials/issuance-request-api.md b/articles/active-directory/verifiable-credentials/issuance-request-api.md index 180225004608..e33f5be16f69 100644 --- a/articles/active-directory/verifiable-credentials/issuance-request-api.md +++ b/articles/active-directory/verifiable-credentials/issuance-request-api.md @@ -16,6 +16,8 @@ ms.author: barclayn # Request Service REST API issuance specification (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + Azure Active Directory (Azure AD) Verifiable Credentials includes the Request Service REST API. This API allows you to issue and verify a credential. This article specifies the Request Service REST API for an issuance request. ## HTTP request diff --git a/articles/active-directory/verifiable-credentials/issuer-openid.md b/articles/active-directory/verifiable-credentials/issuer-openid.md index 59374980c2a0..c4f7384130cf 100644 --- a/articles/active-directory/verifiable-credentials/issuer-openid.md +++ b/articles/active-directory/verifiable-credentials/issuer-openid.md @@ -15,6 +15,8 @@ ms.author: barclayn # Issuer service communication examples (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + The Azure AD Verifiable Credential service can issue verifiable credentials by retrieving claims from an ID token generated by your organization's OpenID compliant identity provider. This article instructs you on how to set up your identity provider so Authenticator can communicate with it and retrieve the correct ID Token to pass to the issuing service. > [!IMPORTANT] diff --git a/articles/active-directory/verifiable-credentials/plan-issuance-solution.md b/articles/active-directory/verifiable-credentials/plan-issuance-solution.md index 0195706a75dc..77753973d3dd 100644 --- a/articles/active-directory/verifiable-credentials/plan-issuance-solution.md +++ b/articles/active-directory/verifiable-credentials/plan-issuance-solution.md @@ -14,6 +14,8 @@ ms.custom: references_regions # Plan your Azure Active Directory Verifiable Credentials issuance solution (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + >[!IMPORTANT] > Azure Active Directory Verifiable Credentials is currently in public preview. This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. For more information, see [**Supplemental Terms of Use for Microsoft Azure Previews**](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). diff --git a/articles/active-directory/verifiable-credentials/plan-verification-solution.md b/articles/active-directory/verifiable-credentials/plan-verification-solution.md index d50bdb986d8a..1a772da51745 100644 --- a/articles/active-directory/verifiable-credentials/plan-verification-solution.md +++ b/articles/active-directory/verifiable-credentials/plan-verification-solution.md @@ -14,6 +14,8 @@ ms.custom: references_regions # Plan your Azure Active Directory Verifiable Credentials verification solution (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + >[!IMPORTANT] > Azure Active Directory Verifiable Credentials is currently in public preview. This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). diff --git a/articles/active-directory/verifiable-credentials/presentation-request-api.md b/articles/active-directory/verifiable-credentials/presentation-request-api.md index 78a21415cc5d..5923723b69bd 100644 --- a/articles/active-directory/verifiable-credentials/presentation-request-api.md +++ b/articles/active-directory/verifiable-credentials/presentation-request-api.md @@ -16,6 +16,8 @@ ms.author: barclayn # Request Service REST API presentation specification (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + Azure Active Directory (Azure AD) Verifiable Credentials includes the Request Service REST API. This API allows you to issue and verify a credential. This article specifies the Request Service REST API for a presentation request. The presentation request asks the user to present a verifiable credential, and then verify the credential. ## HTTP request @@ -46,18 +48,18 @@ POST https://beta.did.msidentity.com/v1.0/contoso.onmicrosoft.com/verifiablecred Content-Type: application/json Authorization: Bearer -{ -    "includeQRCode": true, - "callback": { -    "url": "https://www.contoso.com/api/verifier/presentationCallbac", -    "state": "11111111-2222-2222-2222-333333333333", -      "headers": { -        "api-key": "an-api-key-can-go-here" -      } -    }, +{ +    "includeQRCode": true, + "callback": { +    "url": "https://www.contoso.com/api/verifier/presentationCallbac", +    "state": "11111111-2222-2222-2222-333333333333", +      "headers": { +        "api-key": "an-api-key-can-go-here" +      } +    },     ... -} -``` +} +``` The following permission is required to call the Request Service REST API. For more information, see [Grant permissions to get access tokens](verifiable-credentials-configure-tenant.md#grant-permissions-to-get-access-tokens). @@ -98,7 +100,7 @@ The presentation request payload contains information about your verifiable cred } ``` -The payload contains the following properties. +The payload contains the following properties. |Parameter |Type | Description | |---------|---------|---------| @@ -154,12 +156,12 @@ The Request Service REST API generates several events to the callback endpoint. If successful, this method returns a response code (*HTTP 201 Created*), and a collection of event objects in the response body. The following JSON demonstrates a successful response: ```json -{ +{ "requestId": "e4ef27ca-eb8c-4b63-823b-3b95140eac11", "url": "openid://vc/?request_uri=https://beta.did.msidentity.com/v1.0/87654321-0000-0000-0000-000000000000/verifiablecredentials/request/e4ef27ca-eb8c-4b63-823b-3b95140eac11", "expiry": 1633017751, "qrCode": "data:image/png;base64,iVBORw0KGgoA" -} +} ``` The response contains the following properties: @@ -200,7 +202,7 @@ The response contains the following properties: ## Callback events -The callback endpoint is called when a user scans the QR code, uses the deep link the authenticator app, or finishes the presentation process. +The callback endpoint is called when a user scans the QR code, uses the deep link the authenticator app, or finishes the presentation process. |Property |Type |Description | |---------|---------|---------| @@ -208,17 +210,17 @@ The callback endpoint is called when a user scans the QR code, uses the deep lin | `code` |string |The code returned when the request was retrieved by the authenticator app. Possible values:
  • `request_retrieved`: The user scanned the QR code or selected the link that starts the presentation flow.
  • `presentation_verified`: The verifiable credential validation completed successfully.
| | `state` |string| Returns the state value that you passed in the original payload. | | `subject`|string | The verifiable credential user DID.| -| `issuers`| array |Returns an array of verifiable credentials requested. For each verifiable credential, it provides:
  • The verifiable credential type(s).
  • The issuer's DID
  • The claims retrieved.
  • The verifiable credential issuer’s domain.
  • The verifiable credential issuer’s domain validation status.
  • | +| `issuers`| array |Returns an array of verifiable credentials requested. For each verifiable credential, it provides:
  • The verifiable credential type(s).
  • The issuer's DID
  • The claims retrieved.
  • The verifiable credential issuer's domain.
  • The verifiable credential issuer's domain validation status.
  • | | `receipt`| string | Optional. The receipt contains the original payload sent from the wallet to the Verifiable Credentials service. The receipt should be used for troubleshooting/debugging only. The format in the receipt is not fix and can change based on the wallet and version used.| The following example demonstrates a callback payload when the authenticator app starts the presentation request: ```json -{ -    "requestId":"aef2133ba45886ce2c38974339ba1057", -    "code":"request_retrieved", +{ +    "requestId":"aef2133ba45886ce2c38974339ba1057", +    "code":"request_retrieved",     "state":"Wy0ThUz1gSasAjS1" -} +} ``` The following example demonstrates a callback payload after the verifiable credential presentation has successfully completed: diff --git a/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-issuer.md b/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-issuer.md index 5b6fa0221229..b7b326011ced 100644 --- a/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-issuer.md +++ b/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-issuer.md @@ -12,8 +12,11 @@ ms.date: 05/03/2022 --- + # Issue Azure AD Verifiable Credentials from an application (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + In this tutorial, you run a sample application from your local computer that connects to your Azure Active Directory (Azure AD) tenant. Using the application, you're going to issue and verify a verified credential expert card. In this article, you learn how to: diff --git a/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-tenant.md b/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-tenant.md index da56a83c2283..4650bf09e5fe 100644 --- a/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-tenant.md +++ b/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-tenant.md @@ -14,6 +14,8 @@ ms.date: 05/06/2022 # Configure your tenant for Azure AD Verifiable Credentials (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + Azure Active Directory (Azure AD) Verifiable Credentials safeguards your organization with an identity solution that's seamless and decentralized. The service allows you to issue and verify credentials. For issuers, Azure AD provides a service that they can customize and use to issue their own verifiable credentials. For verifiers, the service provides a free REST API that makes it easy to request and accept verifiable credentials in your apps and services. In this tutorial, you learn how to configure your Azure AD tenant so it can use the verifiable credentials service. diff --git a/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-verifier.md b/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-verifier.md index adee0076d469..0bdf16045116 100644 --- a/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-verifier.md +++ b/articles/active-directory/verifiable-credentials/verifiable-credentials-configure-verifier.md @@ -7,13 +7,15 @@ author: barclayn manager: rkarlin ms.author: barclayn ms.topic: tutorial -ms.date: 10/08/2021 +ms.date: 05/18/2022 # Customer intent: As an enterprise, we want to enable customers to manage information about themselves by using verifiable credentials. --- # Configure Azure AD Verifiable Credentials verifier (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + In [Issue Azure AD Verifiable Credentials from an application (preview)](verifiable-credentials-configure-issuer.md), you learn how to issue and verify credentials by using the same Azure Active Directory (Azure AD) tenant. In this tutorial, you go over the steps needed to present and verify your first verifiable credential: a verified credential expert card. As a verifier, you unlock privileges to subjects that possess verified credential expert cards. In this tutorial, you run a sample application from your local computer that asks you to present a verified credential expert card, and then verifies it. diff --git a/articles/active-directory/verifiable-credentials/verifiable-credentials-faq.md b/articles/active-directory/verifiable-credentials/verifiable-credentials-faq.md index 83b69dd8f266..d8bfe7ea11be 100644 --- a/articles/active-directory/verifiable-credentials/verifiable-credentials-faq.md +++ b/articles/active-directory/verifiable-credentials/verifiable-credentials-faq.md @@ -13,6 +13,8 @@ ms.author: barclayn # Frequently Asked Questions (FAQ) (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + This page contains commonly asked questions about Verifiable Credentials and Decentralized Identity. Questions are organized into the following sections. - [Vocabulary and basics](#the-basics) diff --git a/articles/active-directory/verifiable-credentials/whats-new.md b/articles/active-directory/verifiable-credentials/whats-new.md index 24c8d975af2b..0ffb3b9f36b8 100644 --- a/articles/active-directory/verifiable-credentials/whats-new.md +++ b/articles/active-directory/verifiable-credentials/whats-new.md @@ -16,6 +16,8 @@ ms.author: barclayn # What's new in Azure Active Directory Verifiable Credentials (preview) +[!INCLUDE [Verifiable Credentials announcement](../../../includes/verifiable-credentials-brand.md)] + This article lists the latest features, improvements, and changes in the Azure Active Directory (Azure AD) Verifiable Credentials service. ## May 2022 diff --git a/articles/aks/TOC.yml b/articles/aks/TOC.yml index 8eb6e1ea44f8..ebed23115d6b 100644 --- a/articles/aks/TOC.yml +++ b/articles/aks/TOC.yml @@ -467,10 +467,16 @@ href: open-service-mesh-uninstall-add-on.md - name: Track releases and region availability href: release-tracker.md + - name: Deploy the Kubernetes Event-driven Autoscaler (KEDA) add-on (preview) + items: + - name: About Kubernetes Event-driven Autoscaler (KEDA) + href: keda-about.md + - name: Use ARM template + href: keda-deploy-add-on-arm.md + - name: Kubernetes Event-driven Autoscaler (KEDA) integrations + href: keda-integrations.md - name: Use Web Application Routing (preview) href: web-app-routing.md - - name: Kubernetes Event-driven Autoscaler add-on (preview) - href: keda.md - name: Use cluster extensions href: cluster-extensions.md - name: DevOps diff --git a/articles/aks/howto-deploy-java-liberty-app-with-postgresql.md b/articles/aks/howto-deploy-java-liberty-app-with-postgresql.md index fec268159616..ef5c02bc74a9 100644 --- a/articles/aks/howto-deploy-java-liberty-app-with-postgresql.md +++ b/articles/aks/howto-deploy-java-liberty-app-with-postgresql.md @@ -81,7 +81,7 @@ The steps in this section guide you through creating an Azure Database for Postg Use the [az postgres server create](/cli/azure/postgres/server#az-postgres-server-create) command to create the DB server. The following example creates a DB server named *youruniquedbname*. Make sure *youruniqueacrname* is unique within Azure. > [!TIP] - > To help ensure a globally unique name, prepend a disambiguation string such as your intitials and the MMDD of today's date. + > To help ensure a globally unique name, prepend a disambiguation string such as your initials and the MMDD of today's date. ```bash @@ -153,7 +153,7 @@ In directory *liberty/config*, the *server.xml* is used to configure the DB conn After the offer is successfully deployed, an AKS cluster will be generated automatically. The AKS cluster is configured to connect to the ACR. Before we get started with the application, we need to extract the namespace configured for the AKS. -1. Run following command to print the current deployment file, using the `appDeploymentTemplateYamlEncoded` you saved above. The output contains all the variables we need. +1. Run the following command to print the current deployment file, using the `appDeploymentTemplateYamlEncoded` you saved above. The output contains all the variables we need. ```bash echo | base64 -d diff --git a/articles/aks/includes/keda/current-version-callout.md b/articles/aks/includes/keda/current-version-callout.md new file mode 100644 index 000000000000..59fdc4969333 --- /dev/null +++ b/articles/aks/includes/keda/current-version-callout.md @@ -0,0 +1,11 @@ +--- +author: tomkerkhove + +ms.service: container-service +ms.topic: include +ms.date: 05/24/2022 +ms.author: tomkerkhove +--- + +> [!IMPORTANT] +> The KEDA add-on installs version *2.7.0* of KEDA on your cluster. \ No newline at end of file diff --git a/articles/aks/keda-about.md b/articles/aks/keda-about.md new file mode 100644 index 000000000000..68e36648197d --- /dev/null +++ b/articles/aks/keda-about.md @@ -0,0 +1,79 @@ +--- +title: Kubernetes Event-driven Autoscaling (KEDA) (Preview) +description: Simplified application autoscaling with Kubernetes Event-driven Autoscaling (KEDA) add-on. +services: container-service +author: tomkerkhove +ms.topic: article +ms.date: 05/24/2022 +ms.author: tomkerkhove +--- + +# Simplified application autoscaling with Kubernetes Event-driven Autoscaling (KEDA) add-on (Preview) + +Kubernetes Event-driven Autoscaling (KEDA) is a single-purpose and lightweight component that strives to make application autoscaling simple and is a CNCF Incubation project. + +It applies event-driven autoscaling to scale your application to meet demand in a sustainable and cost-efficient manner with scale-to-zero. + +The KEDA add-on makes it even easier by deploying a managed KEDA installation, providing you with [a rich catalog of 50+ KEDA scalers][keda-scalers] that you can scale your applications with on your Azure Kubernetes Services (AKS) cluster. + +[!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] + +## Architecture + +[KEDA][keda] provides two main components: + +- **KEDA operator** allows end-users to scale workloads in/out from 0 to N instances with support for Kubernetes Deployments, Jobs, StatefulSets or any custom resource that defines `/scale` subresource. +- **Metrics server** exposes external metrics to Horizontal Pod Autoscaler (HPA) in Kubernetes for autoscaling purposes such as messages in a Kafka topic, or number of events in an Azure event hub. Due to upstream limitations, KEDA must be the only installed metric adapter. + +![Diagram that shows the architecture of K E D A and how it extends Kubernetes instead of re-inventing the wheel.](./media/keda/architecture.png) + +Learn more about how KEDA works in the [official KEDA documentation][keda-architecture]. + +## Installation and version + +KEDA can be added to your Azure Kubernetes Service (AKS) cluster by enabling the KEDA add-on using an [ARM template][keda-arm]. + +The KEDA add-on provides a fully supported installation of KEDA that is integrated with AKS. + +[!INCLUDE [Current version callout](./includes/keda/current-version-callout.md)] + +## Capabilities and features + +KEDA provides the following capabilities and features: + +- Build sustainable and cost-efficient applications with scale-to-zero +- Scale application workloads to meet demand using [a rich catalog of 50+ KEDA scalers][keda-scalers] +- Autoscale applications with `ScaledObjects`, such as Deployments, StatefulSets or any custom resource that defines `/scale` subresource +- Autoscale job-like workloads with `ScaledJobs` +- Use production-grade security by decoupling autoscaling authentication from workloads +- Bring-your-own external scaler to use tailor-made autoscaling decisions + +## Add-on limitations + +The KEDA AKS add-on has the following limitations: + +* KEDA's [HTTP add-on (preview)][keda-http-add-on] to scale HTTP workloads isn't installed with the extension, but can be deployed separately. +* KEDA's [external scaler for Azure Cosmos DB][keda-cosmos-db-scaler] to scale based on Azure Cosmos DB change feed isn't installed with the extension, but can be deployed separately. +* Only one metric server is allowed in the Kubernetes cluster. Because of that the KEDA add-on should be the only metrics server inside the cluster. + * Multiple KEDA installations aren't supported +* Managed identity isn't supported. + +For general KEDA questions, we recommend [visiting the FAQ overview][keda-faq]. + +## Next steps + +* [Enable the KEDA add-on with an ARM template][keda-arm] +* [Autoscale a .NET Core worker processing Azure Service Bus Queue messages][keda-sample] + + +[keda-azure-cli]: keda-deploy-addon-az-cli.md +[keda-arm]: keda-deploy-add-on-arm.md + + +[keda]: https://keda.sh/ +[keda-architecture]: https://keda.sh/docs/latest/concepts/ +[keda-faq]: https://keda.sh/docs/latest/faq/ +[keda-sample]: https://github.com/kedacore/sample-dotnet-worker-servicebus-queue +[keda-scalers]: https://keda.sh/docs/scalers/ +[keda-http-add-on]: https://github.com/kedacore/http-add-on +[keda-cosmos-db-scaler]: https://github.com/kedacore/external-scaler-azure-cosmos-db diff --git a/articles/aks/keda.md b/articles/aks/keda-deploy-add-on-arm.md similarity index 75% rename from articles/aks/keda.md rename to articles/aks/keda-deploy-add-on-arm.md index 58d40ca2eb21..53421176639e 100644 --- a/articles/aks/keda.md +++ b/articles/aks/keda-deploy-add-on-arm.md @@ -1,29 +1,21 @@ --- -title: KEDA add-on on Azure Kubernetes Service (AKS) (Preview) -description: Use the KEDA add-on to deploy a managed KEDA instance on Azure Kubernetes Service (AKS). +title: Deploy the Kubernetes Event-driven Autoscaling (KEDA) add-on by using an ARM template +description: Use an ARM template to deploy the Kubernetes Event-driven Autoscaling (KEDA) add-on to Azure Kubernetes Service (AKS). services: container-service author: jahabibi ms.topic: article -ms.custom: event-tier1-build-2022 -ms.date: 05/24/2021 +ms.date: 05/24/2022 ms.author: jahabibi --- -# Simplified application autoscaling with Kubernetes Event-driven Autoscaling (KEDA) add-on (Preview) +# Deploy the Kubernetes Event-driven Autoscaling (KEDA) add-on by using ARM template -Kubernetes Event-driven Autoscaling (KEDA) is a single-purpose and lightweight component that strives to make application autoscaling simple and is a CNCF Incubation project. +This article shows you how to deploy the Kubernetes Event-driven Autoscaling (KEDA) add-on to Azure Kubernetes Service (AKS) by using an [ARM](../azure-resource-manager/templates/index.yml) template. -The KEDA add-on makes it even easier by deploying a managed KEDA installation, providing you with [a rich catalog of 40+ KEDA scalers](https://keda.sh/docs/latest/scalers/) that you can scale your applications with on your Azure Kubernetes Services (AKS) cluster. +[!INCLUDE [Current version callout](./includes/keda/current-version-callout.md)] [!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] -## KEDA add-on overview - -[KEDA][keda] provides two main components: - -- **KEDA operator** allows end-users to scale workloads in/out from 0 to N instances with support for Kubernetes Deployments, Jobs, StatefulSets or any custom resource that defines `/scale` subresource. -- **Metrics server** exposes external metrics to HPA in Kubernetes for autoscaling purposes such as messages in a Kafka topic, or number of events in an Azure event hub. Due to upstream limitations, this must be the only installed metric adapter. - ## Prerequisites > [!NOTE] @@ -134,9 +126,11 @@ The following snippet is a sample deployment that creates a cluster with KEDA en } ``` -## Use KEDA +## Start scaling apps with KEDA -KEDA scaling will only work once a custom resource definition has been defined (CRD). To learn more about KEDA CRDs, follow the official [KEDA documentation][keda-scalers] to define your scaler. +Now that KEDA is installed, you can start autoscaling your apps with KEDA by using its custom resource definition has been defined (CRD). + +To learn more about KEDA CRDs, follow the official [KEDA documentation][keda-scalers] to define your scaler. ## Clean Up @@ -145,6 +139,9 @@ To remove the resource group, and all related resources, use the [az group delet ```azurecli az group delete --name MyResourceGroup ``` +## Next steps + +This article showed you how to install the KEDA add-on on an AKS cluster, and then verify that it's installed and running. With the KEDA add-on installed on your cluster, you can [deploy a sample application][keda-sample] to start scaling apps [az-aks-create]: /cli/azure/aks#az-aks-create @@ -157,3 +154,4 @@ az group delete --name MyResourceGroup [kubectl]: https://kubernetes.io/docs/user-guide/kubectl [keda]: https://keda.sh/ [keda-scalers]: https://keda.sh/docs/scalers/ +[keda-sample]: https://github.com/kedacore/sample-dotnet-worker-servicebus-queue diff --git a/articles/aks/keda-integrations.md b/articles/aks/keda-integrations.md new file mode 100644 index 000000000000..e0876ac58728 --- /dev/null +++ b/articles/aks/keda-integrations.md @@ -0,0 +1,63 @@ +--- +title: Integrations with Kubernetes Event-driven Autoscaling (KEDA) on Azure Kubernetes Service (AKS) (Preview) +description: Integrations with Kubernetes Event-driven Autoscaling (KEDA) on Azure Kubernetes Service (AKS) (Preview). +services: container-service +author: tomkerkhove +ms.topic: article +ms.date: 05/24/2022 +ms.author: tomkerkhove +--- + +# Integrations with Kubernetes Event-driven Autoscaling (KEDA) on Azure Kubernetes Service (AKS) (Preview) + +The Kubernetes Event-driven Autoscaling (KEDA) add-on integrates with features provided by Azure and open source projects. + +[!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] + +> [!IMPORTANT] +> Integrations with open source projects are not covered by the [AKS support policy][aks-support-policy]. + +## Observe your autoscaling with Kubernetes events + +KEDA automatically emits Kubernetes events allowing customers to operate their application autoscaling. + +To learn about the available metrics, we recommend reading the [KEDA documentation][keda-event-docs]. + +## Scalers for Azure services + +KEDA can integrate with various tools and services through [a rich catalog of 50+ KEDA scalers][keda-scalers]. It supports leading cloud platforms (such as Azure) and open-source technologies such as Redis and Kafka. + +It leverages the following scalers for Azure services: + +- [Azure Application Insights](https://keda.sh/docs/latest/scalers/azure-app-insights/) +- [Azure Blob Storage](https://keda.sh/docs/latest/scalers/azure-storage-blob/) +- [Azure Data Explorer](https://keda.sh/docs/latest/scalers/azure-data-explorer/) +- [Azure Event Hubs](https://keda.sh/docs/latest/scalers/azure-event-hub/) +- [Azure Log Analytics](https://keda.sh/docs/latest/scalers/azure-log-analytics/) +- [Azure Monitor](https://keda.sh/docs/latest/scalers/azure-monitor/) +- [Azure Pipelines](https://keda.sh/docs/latest/scalers/azure-pipelines/) +- [Azure Service Bus](https://keda.sh/docs/latest/scalers/azure-service-bus/) +- [Azure Storage Queue](https://keda.sh/docs/latest/scalers/azure-storage-queue/) + +Next to the built-in scalers, you can install external scalers yourself to autoscale on other Azure services: + +- [Azure Cosmos DB (Change feed)](https://github.com/kedacore/external-scaler-azure-cosmos-db) + +However, these external scalers aren't supported as part of the add-on and rely on community support. + +## Next steps + +* [Enable the KEDA add-on with an ARM template][keda-arm] +* [Autoscale a .NET Core worker processing Azure Service Bus Queue message][keda-sample] + + +[aks-support-policy]: support-policies.md +[azure-monitor]: ../azure-monitor/overview.md +[azure-monitor-container-insights]: ../azure-monitor/containers/container-insights-onboard.md +[keda-arm]: keda-deploy-add-on-arm.md + + +[keda-scalers]: https://keda.sh/docs/scalers/ +[keda-metrics]: https://keda.sh/docs/latest/operate/prometheus/ +[keda-event-docs]: https://keda.sh/docs/latest/operate/kubernetes-events/ +[keda-sample]: https://github.com/kedacore/sample-dotnet-worker-servicebus-queue diff --git a/articles/aks/learn/quick-windows-container-deploy-cli.md b/articles/aks/learn/quick-windows-container-deploy-cli.md index 6d9c53d57c98..ed4ee4f40d9e 100644 --- a/articles/aks/learn/quick-windows-container-deploy-cli.md +++ b/articles/aks/learn/quick-windows-container-deploy-cli.md @@ -302,9 +302,6 @@ spec: limits: cpu: 1 memory: 800M - requests: - cpu: .1 - memory: 300M ports: - containerPort: 80 selector: diff --git a/articles/aks/learn/quick-windows-container-deploy-powershell.md b/articles/aks/learn/quick-windows-container-deploy-powershell.md index e4fed7b2564f..e18be108d1a9 100644 --- a/articles/aks/learn/quick-windows-container-deploy-powershell.md +++ b/articles/aks/learn/quick-windows-container-deploy-powershell.md @@ -211,9 +211,6 @@ spec: limits: cpu: 1 memory: 800M - requests: - cpu: .1 - memory: 300M ports: - containerPort: 80 selector: diff --git a/articles/aks/media/keda/architecture.png b/articles/aks/media/keda/architecture.png new file mode 100644 index 000000000000..b5751183644d Binary files /dev/null and b/articles/aks/media/keda/architecture.png differ diff --git a/articles/aks/private-clusters.md b/articles/aks/private-clusters.md index 94723e3c13bb..8cca35c58e57 100644 --- a/articles/aks/private-clusters.md +++ b/articles/aks/private-clusters.md @@ -3,7 +3,7 @@ title: Create a private Azure Kubernetes Service cluster description: Learn how to create a private Azure Kubernetes Service (AKS) cluster services: container-service ms.topic: article -ms.date: 01/12/2022 +ms.date: 05/27/2022 --- @@ -150,6 +150,9 @@ As mentioned, virtual network peering is one way to access your private cluster. 3. In scenarios where the VNet containing your cluster has custom DNS settings (4), cluster deployment fails unless the private DNS zone is linked to the VNet that contains the custom DNS resolvers (5). This link can be created manually after the private zone is created during cluster provisioning or via automation upon detection of creation of the zone using event-based deployment mechanisms (for example, Azure Event Grid and Azure Functions). +> [!NOTE] +> Conditional Forwarding doesn't support subdomains. + > [!NOTE] > If you are using [Bring Your Own Route Table with kubenet](./configure-kubenet.md#bring-your-own-subnet-and-route-table-with-kubenet) and Bring Your Own DNS with Private Cluster, the cluster creation will fail. You will need to associate the [RouteTable](./configure-kubenet.md#bring-your-own-subnet-and-route-table-with-kubenet) in the node resource group to the subnet after the cluster creation failed, in order to make the creation successful. diff --git a/articles/aks/use-managed-identity.md b/articles/aks/use-managed-identity.md index c57179e93438..f1bb0e5d3cb1 100644 --- a/articles/aks/use-managed-identity.md +++ b/articles/aks/use-managed-identity.md @@ -322,22 +322,20 @@ A successful cluster creation using your own kubelet managed identity contains t }, ``` -### Update an existing cluster using kubelet identity (Preview) +### Update an existing cluster using kubelet identity Update kubelet identity on an existing cluster with your existing identities. -#### Install the `aks-preview` Azure CLI - -You also need the *aks-preview* Azure CLI extension version 0.5.64 or later. Install the *aks-preview* Azure CLI extension by using the [az extension add][az-extension-add] command. Or install any available updates by using the [az extension update][az-extension-update] command. +#### Make sure the CLI version is 2.37.0 or later ```azurecli-interactive -# Install the aks-preview extension -az extension add --name aks-preview +# Check the version of Azure CLI modules +az version -# Update the extension to make sure you have the latest version installed -az extension update --name aks-preview +# Upgrade the version to make sure it is 2.37.0 or later +az upgrade ``` -#### Updating your cluster with kubelet identity (Preview) +#### Updating your cluster with kubelet identity Now you can use the following command to update your cluster with your existing identities. Provide the control plane identity id via `assign-identity` and the kubelet managed identity via `assign-kubelet-identity`: diff --git a/articles/aks/web-app-routing.md b/articles/aks/web-app-routing.md index 20a6a1f63806..16058ec323d8 100644 --- a/articles/aks/web-app-routing.md +++ b/articles/aks/web-app-routing.md @@ -23,10 +23,9 @@ The Web Application Routing solution makes it easy to access applications that a The add-on deploys four components: an [nginx ingress controller][nginx], [Secrets Store CSI Driver][csi-driver], [Open Service Mesh (OSM)][osm], and [External-DNS][external-dns] controller. - **Nginx ingress Controller**: The ingress controller exposed to the internet. -- **External-dns**: Watches for Kubernetes Ingress resources and creates DNS A records in the cluster-specific DNS zone. +- **External-DNS controller**: Watches for Kubernetes Ingress resources and creates DNS A records in the cluster-specific DNS zone. - **CSI driver**: Connector used to communicate with keyvault to retrieve SSL certificates for ingress controller. - **OSM**: A lightweight, extensible, cloud native service mesh that allows users to uniformly manage, secure, and get out-of-the-box observability features for highly dynamic microservice environments. -- **External-DNS controller**: Watches for Kubernetes Ingress resources and creates DNS A records in the cluster-specific DNS zone. ## Prerequisites @@ -251,4 +250,4 @@ service "aks-helloworld" deleted [kubectl-delete]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#delete [kubectl-logs]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#logs [ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[ingress-resource]: https://kubernetes.io/docs/concepts/services-networking/ingress/#the-ingress-resource \ No newline at end of file +[ingress-resource]: https://kubernetes.io/docs/concepts/services-networking/ingress/#the-ingress-resource diff --git a/articles/app-service/environment/migrate.md b/articles/app-service/environment/migrate.md index 8ca298d07268..6935332ed62e 100644 --- a/articles/app-service/environment/migrate.md +++ b/articles/app-service/environment/migrate.md @@ -149,7 +149,7 @@ There's no cost to migrate your App Service Environment. You'll stop being charg - **What happens if migration fails or there is an unexpected issue during the migration?** If there's an unexpected issue, support teams will be on hand. It's recommended to migrate dev environments before touching any production environments. - **What happens to my old App Service Environment?** - If you decide to migrate an App Service Environment, the old environment gets shut down and deleted and all of your apps are migrated to a new environment. Your old environment will no longer be accessible. + If you decide to migrate an App Service Environment, the old environment gets shut down and deleted and all of your apps are migrated to a new environment. Your old environment will no longer be accessible. A rollback to the old environment will not be possible. - **What will happen to my App Service Environment v1/v2 resources after 31 August 2024?** After 31 August 2024, if you haven't migrated to App Service Environment v3, your App Service Environment v1/v2s and the apps deployed in them will no longer be available. App Service Environment v1/v2 is hosted on App Service scale units running on [Cloud Services (classic)](../../cloud-services/cloud-services-choose-me.md) architecture that will be [retired on 31 August 2024](https://azure.microsoft.com/updates/cloud-services-retirement-announcement/). Because of this, [App Service Environment v1/v2 will no longer be available after that date](https://azure.microsoft.com/updates/app-service-environment-v1-and-v2-retirement-announcement/). Migrate to App Service Environment v3 to keep your apps running or save or back up any resources or data that you need to maintain. @@ -165,4 +165,4 @@ There's no cost to migrate your App Service Environment. You'll stop being charg > [App Service Environment v3 Networking](networking.md) > [!div class="nextstepaction"] -> [Using an App Service Environment v3](using.md) \ No newline at end of file +> [Using an App Service Environment v3](using.md) diff --git a/articles/applied-ai-services/form-recognizer/compose-custom-models-preview.md b/articles/applied-ai-services/form-recognizer/compose-custom-models-preview.md index d10922ecc3df..87537f3c1183 100644 --- a/articles/applied-ai-services/form-recognizer/compose-custom-models-preview.md +++ b/articles/applied-ai-services/form-recognizer/compose-custom-models-preview.md @@ -74,7 +74,7 @@ If you want to use manually labeled data, you'll also have to upload the *.label When you [train your model](https://formrecognizer.appliedai.azure.com/studio/custommodel/projects) with labeled data, the model uses supervised learning to extract values of interest, using the labeled forms you provide. Labeled data results in better-performing models and can produce models that work with complex forms or forms containing values without keys. -Form Recognizer uses the [prebuilt-layout model](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument) API to learn the expected sizes and positions of printed and handwritten text elements and extract tables. Then it uses user-specified labels to learn the key/value associations and tables in the documents. We recommend that you use five manually labeled forms of the same type (same structure) to get started with training a new model. Then, add more labeled data, as needed, to improve the model accuracy. Form Recognizer enables training a model to extract key-value pairs and tables using supervised learning capabilities. +Form Recognizer uses the [prebuilt-layout model](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v3-0-preview-2/operations/AnalyzeDocument) API to learn the expected sizes and positions of typeface and handwritten text elements and extract tables. Then it uses user-specified labels to learn the key/value associations and tables in the documents. We recommend that you use five manually labeled forms of the same type (same structure) to get started with training a new model. Then, add more labeled data, as needed, to improve the model accuracy. Form Recognizer enables training a model to extract key-value pairs and tables using supervised learning capabilities. ### [Form Recognizer Studio](#tab/studio) diff --git a/articles/applied-ai-services/form-recognizer/compose-custom-models.md b/articles/applied-ai-services/form-recognizer/compose-custom-models.md index 2982ab482a60..3e508fa91cb7 100644 --- a/articles/applied-ai-services/form-recognizer/compose-custom-models.md +++ b/articles/applied-ai-services/form-recognizer/compose-custom-models.md @@ -74,7 +74,7 @@ You [train your model](./quickstarts/try-sdk-rest-api.md#train-a-custom-model) When you train with labeled data, the model uses supervised learning to extract values of interest, using the labeled forms you provide. Labeled data results in better-performing models and can produce models that work with complex forms or forms containing values without keys. -Form Recognizer uses the [Layout](concept-layout.md) API to learn the expected sizes and positions of printed and handwritten text elements and extract tables. Then it uses user-specified labels to learn the key/value associations and tables in the documents. We recommend that you use five manually labeled forms of the same type (same structure) to get started when training a new model and add more labeled data as needed to improve the model accuracy. Form Recognizer enables training a model to extract key value pairs and tables using supervised learning capabilities. +Form Recognizer uses the [Layout](concept-layout.md) API to learn the expected sizes and positions of typeface and handwritten text elements and extract tables. Then it uses user-specified labels to learn the key/value associations and tables in the documents. We recommend that you use five manually labeled forms of the same type (same structure) to get started when training a new model and add more labeled data as needed to improve the model accuracy. Form Recognizer enables training a model to extract key value pairs and tables using supervised learning capabilities. [Get started with Train with labels](label-tool.md) diff --git a/articles/applied-ai-services/form-recognizer/concept-layout.md b/articles/applied-ai-services/form-recognizer/concept-layout.md index 69226aad645a..18b2f6d09852 100644 --- a/articles/applied-ai-services/form-recognizer/concept-layout.md +++ b/articles/applied-ai-services/form-recognizer/concept-layout.md @@ -103,7 +103,7 @@ You'll need a form document. You can use our [sample form document](https://raw. ## Data extraction -The layout model extracts table structures, selection marks, printed and handwritten text, and bounding box coordinates from your documents. +The layout model extracts table structures, selection marks, typeface and handwritten text, and bounding box coordinates from your documents. ### Tables and table headers @@ -119,7 +119,7 @@ Layout API also extracts selection marks from documents. Extracted selection mar ### Text lines and words -The layout model extracts text from documents and images with multiple text angles and colors. It accepts photos of documents, faxes, printed and/or handwritten (English only) text, and mixed modes. Printed and handwritten text is extracted from lines and words. The service then returns bounding box coordinates, confidence scores, and style (handwritten or other). All the text information is included in the `readResults` section of the JSON output. +The layout model extracts text from documents and images with multiple text angles and colors. It accepts photos of documents, faxes, printed and/or handwritten (English only) text, and mixed modes. Typeface and handwritten text is extracted from lines and words. The service then returns bounding box coordinates, confidence scores, and style (handwritten or other). All the text information is included in the `readResults` section of the JSON output. :::image type="content" source="./media/layout-text-extraction.png" alt-text="Layout text extraction output"::: diff --git a/articles/applied-ai-services/form-recognizer/concept-model-overview.md b/articles/applied-ai-services/form-recognizer/concept-model-overview.md index 7522e18a8b2e..072b681a918b 100644 --- a/articles/applied-ai-services/form-recognizer/concept-model-overview.md +++ b/articles/applied-ai-services/form-recognizer/concept-model-overview.md @@ -23,7 +23,7 @@ Azure Form Recognizer prebuilt models enable you to add intelligent document pro | **Model** | **Description** | | --- | --- | |**Document analysis**|| -| 🆕[Read (preview)](#read-preview) | Extract printed and handwritten text lines, words, locations, and detected languages.| +| 🆕[Read (preview)](#read-preview) | Extract typeface and handwritten text lines, words, locations, and detected languages.| | 🆕[General document (preview)](#general-document-preview) | Extract text, tables, structure, key-value pairs, and named entities.| | [Layout](#layout) | Extract text and layout information from documents.| |**Prebuilt**|| diff --git a/articles/applied-ai-services/form-recognizer/concept-read.md b/articles/applied-ai-services/form-recognizer/concept-read.md index 69bd79ed424c..f206120f9543 100644 --- a/articles/applied-ai-services/form-recognizer/concept-read.md +++ b/articles/applied-ai-services/form-recognizer/concept-read.md @@ -15,7 +15,7 @@ ms.custom: ignite-fall-2021 # Form Recognizer read model -The Form Recognizer v3.0 preview includes the new Read OCR model. Form Recognizer Read builds on the success of Computer Vision Read and optimizes even more for analyzing documents, including new document formats in the future. It extracts printed and handwritten text from documents and images and can handle mixed languages in the documents and text line. The read model can detect lines, words, locations, and additionally detect languages. It is the foundational technology powering the text extraction in Form Recognizer Layout, prebuilt, general document, and custom models. +Form Recognizer v3.0 preview includes the new Read API model. The read model extracts typeface and handwritten text including mixed languages in documents. The read model can detect lines, words, locations, and languages and is the core of all the other Form Recognizer models. Layout, general document, custom, and prebuilt models all use the read model as a foundation for extracting texts from documents. ## Development options diff --git a/articles/applied-ai-services/form-recognizer/create-sas-tokens.md b/articles/applied-ai-services/form-recognizer/create-sas-tokens.md new file mode 100644 index 000000000000..bd7961793a6b --- /dev/null +++ b/articles/applied-ai-services/form-recognizer/create-sas-tokens.md @@ -0,0 +1,181 @@ +--- +title: Create SAS tokens for containers and blobs with the Azure portal +description: Learn how to create shared access signature (SAS) tokens for containers using Azure portal, or Azure Explorer +ms.topic: how-to +author: laujan +manager: nitinme +ms.service: applied-ai-services +ms.subservice: forms-recognizer +ms.date: 05/27/2022 +ms.author: lajanuar +recommendations: false +--- + +# Create SAS tokens for storage containers + + In this article, you'll learn how to create user delegation, shared access signature (SAS) tokens, using the Azure portal or Azure Storage Explorer. User delegation SAS tokens are secured with Azure AD credentials. SAS tokens provide secure, delegated access to resources in your Azure storage account. + +At a high level, here's how SAS tokens work: + +* Your application submits the SAS token to Azure Storage as part of a REST API request. + +* If the storage service verifies that the SAS is valid, the request is authorized. + +* If the SAS token is deemed invalid, the request is declined and the error code 403 (Forbidden) is returned. + +Azure Blob Storage offers three resource types: + +* **Storage** accounts provide a unique namespace in Azure for your data. +* **Data storage containers** are located in storage accounts and organize sets of blobs. +* **Blobs** are located in containers and store text and binary data such as files, text, and images. + +## When to use a SAS token + +* **Training custom models**. Your assembled set of training documents *must* be uploaded to an Azure Blob Storage container. You can opt to use a SAS token to grant access to your training documents. + +* **Using storage containers with public access**. You can opt to use a SAS token to grant limited access to your storage resources that have public read access. + + > [!IMPORTANT] + > + > * If your Azure storage account is protected by a virtual network or firewall, you can't grant access with a SAS token. You'll have to use a [managed identity](managed-identities.md) to grant access to your storage resource. + > + > * [Managed identity](managed-identities-secured-access.md) supports both privately and publicly accessible Azure Blob Storage accounts. + > + > * SAS tokens grant permissions to storage resources, and should be protected in the same manner as an account key. + > + > * Operations that use SAS tokens should be performed only over an HTTPS connection, and SAS URIs should only be distributed on a secure connection such as HTTPS. + +## Prerequisites + +To get started, you'll need: + +* An active [Azure account](https://azure.microsoft.com/free/cognitive-services/). If you don't have one, you can [create a free account](https://azure.microsoft.com/free/). + +* A [Form Recognizer](https://portal.azure.com/#create/Microsoft.CognitiveServicesFormRecognizer) or [Cognitive Services multi-service](https://portal.azure.com/#create/Microsoft.CognitiveServicesAllInOne) resource. + +* A **standard performance** [Azure Blob Storage account](https://portal.azure.com/#create/Microsoft.StorageAccount-ARM). You'll create containers to store and organize your blob data within your storage account. If you don't know how to create an Azure storage account with a storage container, follow these quickstarts: + + * [Create a storage account](../../storage/common/storage-account-create.md). When you create your storage account, select **Standard** performance in the **Instance details** > **Performance** field. + * [Create a container](../../storage/blobs/storage-quickstart-blobs-portal.md#create-a-container). When you create your container, set **Public access level** to **Container** (anonymous read access for containers and blobs) in the **New Container** window. + +## Upload your documents + +1. Go to the [Azure portal](https://portal.azure.com/#home). + * Select **Your storage account** → **Data storage** → **Containers**. + + :::image type="content" source="media/sas-tokens/data-storage-menu.png" alt-text="Screenshot that shows the Data storage menu in the Azure portal."::: + +1. Select a container from the list. + +1. Select **Upload** from the menu at the top of the page. + + :::image type="content" source="media/sas-tokens/container-upload-button.png" alt-text="Screenshot that shows the container Upload button in the Azure portal."::: + +1. The **Upload blob** window will appear. Select your files to upload. + + :::image type="content" source="media/sas-tokens/upload-blob-window.png" alt-text="Screenshot that shows the Upload blob window in the Azure portal."::: + + > [!NOTE] + > By default, the REST API uses form documents located at the root of your container. You can also use data organized in subfolders if specified in the API call. For more information, see [Organize your data in subfolders](./build-training-data-set.md#organize-your-data-in-subfolders-optional). + +## Use the Azure portal + +The Azure portal is a web-based console that enables you to manage your Azure subscription and resources using a graphical user interface (GUI). + +1. Go to the [Azure portal](https://portal.azure.com/#home) and navigate as follows: + + * **Your storage account** → **containers** → **your container**. + +1. Select **Generate SAS** from the menu near the top of the page. + +1. Select **Signing method** → **User delegation key**. + +1. Define **Permissions** by selecting or clearing the appropriate checkbox.
    + + * Make sure the **Read**, **Write**, **Delete**, and **List** permissions are selected. + + :::image type="content" source="media/sas-tokens/sas-permissions.png" alt-text="Screenshot that shows the SAS permission fields in the Azure portal."::: + + >[!IMPORTANT] + > + > * If you receive a message similar to the following one, you'll also need to assign access to the blob data in your storage account: + > + > :::image type="content" source="media/sas-tokens/need-permissions.png" alt-text="Screenshot that shows the lack of permissions warning."::: + > + > * [Azure role-based access control](../../role-based-access-control/overview.md) (Azure RBAC) is the authorization system used to manage access to Azure resources. Azure RBAC helps you manage access and permissions for your Azure resources. + > * [Assign an Azure role for access to blob data](../../role-based-access-control/role-assignments-portal.md?tabs=current) to assign a role that allows for read, write, and delete permissions for your Azure storage container. *See* [Storage Blob Data Contributor](../../role-based-access-control/built-in-roles.md#storage-blob-data-contributor). + +1. Specify the signed key **Start** and **Expiry** times. + + * When you create a SAS token, the default duration is 48 hours. After 48 hours, you'll need to create a new token. + * Consider setting a longer duration period for the time you'll be using your storage account for Form Recognizer Service operations. + * The value for the expiry time is a maximum of seven days from the creation of the SAS token. + +1. The **Allowed IP addresses** field is optional and specifies an IP address or a range of IP addresses from which to accept requests. If the request IP address doesn't match the IP address or address range specified on the SAS token, it won't be authorized. + +1. The **Allowed protocols** field is optional and specifies the protocol permitted for a request made with the SAS token. The default value is HTTPS. + +1. Select **Generate SAS token and URL**. + +1. The **Blob SAS token** query string and **Blob SAS URL** appear in the lower area of the window. To use the Blob SAS token, append it to a storage service URI. + +1. Copy and paste the **Blob SAS token** and **Blob SAS URL** values in a secure location. They're displayed only once and can't be retrieved after the window is closed. + +1. To [construct a SAS URL](#use-your-sas-url-to-grant-access), append the SAS token (URI) to the URL for a storage service. + +## Use Azure Storage Explorer + +Azure Storage Explorer is a free standalone app that enables you to easily manage your Azure cloud storage resources from your desktop. + +### Get started + +* You'll need the [**Azure Storage Explorer**](../../vs-azure-tools-storage-manage-with-storage-explorer.md) app installed in your Windows, macOS, or Linux development environment. + +* After the Azure Storage Explorer app is installed, [connect it the storage account](../../vs-azure-tools-storage-manage-with-storage-explorer.md?tabs=windows#connect-to-a-storage-account-or-service) you're using for Form Recognizer. + +### Create your SAS tokens + +1. Open the Azure Storage Explorer app on your local machine and navigate to your connected **Storage Accounts**. +1. Expand the Storage Accounts node and select **Blob Containers**. +1. Expand the Blob Containers node and right-click a storage **container** node to display the options menu. +1. Select **Get Shared Access Signature** from options menu. +1. In the **Shared Access Signature** window, make the following selections: + * Select your **Access policy** (the default is none). + * Specify the signed key **Start** and **Expiry** date and time. A short lifespan is recommended because, once generated, a SAS can't be revoked. + * Select the **Time zone** for the Start and Expiry date and time (default is Local). + * Define your container **Permissions** by selecting the **Read**, **Write**, **List**, and **Delete** checkboxes. + * Select **key1** or **key2**. + * Review and select **Create**. + +1. A new window will appear with the **Container** name, **SAS URL**, and **Query string** for your container. + +1. **Copy and paste the SAS URL and query string values in a secure location. They'll only be displayed once and can't be retrieved once the window is closed.** + +1. To [construct a SAS URL](#use-your-sas-url-to-grant-access), append the SAS token (URI) to the URL for a storage service. + +## Use your SAS URL to grant access + +The SAS URL includes a special set of [query parameters](/rest/api/storageservices/create-user-delegation-sas#assign-permissions-with-rbac). Those parameters indicate how the resources may be accessed by the client. + +### REST API + +To use your SAS URL with the [REST API](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/TrainCustomModelAsync), add the SAS URL to the request body: + + ```json + { + "source":"" + } + ``` + +### Sample Labeling Tool + +To use your SAS URL with the [Form Recognizer labeling tool](https://fott-2-1.azurewebsites.net/connections/create), add the SAS URL to the **Connection Settings** → **Azure blob container** → **SAS URI** field: + + :::image type="content" source="media/sas-tokens/fott-add-sas-uri.png" alt-text="Screenshot that shows the SAS URI field."::: + +That's it! You've learned how to create SAS tokens to authorize how clients access your data. + +## Next step + +> [!div class="nextstepaction"] +> [Build a training data set](build-training-data-set.md) diff --git a/articles/applied-ai-services/form-recognizer/faq.yml b/articles/applied-ai-services/form-recognizer/faq.yml index 907c43422345..f07c3de1deb0 100644 --- a/articles/applied-ai-services/form-recognizer/faq.yml +++ b/articles/applied-ai-services/form-recognizer/faq.yml @@ -72,7 +72,7 @@ sections: Azure Form Recognizer is a cloud-based Azure Applied AI Service that is built using optical character recognition (OCR), Text Analytics, and Custom Text from Azure Cognitive Services. - OCR is used to extract text from printed and handwritten documents. + OCR is used to extract typeface and handwritten text documents. Form Recognizer uses OCR to detect and extract information from forms and documents supported by AI to provide more structure and information to the text extraction. diff --git a/articles/applied-ai-services/form-recognizer/generate-sas-tokens.md b/articles/applied-ai-services/form-recognizer/generate-sas-tokens.md deleted file mode 100644 index 63305541f6cd..000000000000 --- a/articles/applied-ai-services/form-recognizer/generate-sas-tokens.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: Generate SAS tokens for containers and blobs with the Azure portal -description: Learn how to generate shared access signature (SAS) tokens for containers and blobs in the Azure portal. -ms.topic: how-to -author: laujan -manager: nitinme -ms.service: applied-ai-services -ms.subservice: forms-recognizer -ms.date: 09/23/2021 -ms.author: lajanuar -recommendations: false ---- - -# Generate SAS tokens for storage containers - -In this article, you'll learn how to generate user delegation shared access signature (SAS) tokens for Azure Blob Storage containers. A user delegation SAS token is signed with Azure Active Directory (Azure AD) credentials instead of Azure Storage keys. It provides superior secure and delegated access to resources in your Azure storage account. - -At a high level, here's how it works: your application provides the SAS token to Azure Storage as part of a request. If the storage service verifies that the shared access signature is valid, the request is authorized. If the shared access signature is considered invalid, the request is declined with error code 403 (Forbidden). - -Azure Blob Storage offers three types of resources: - -* **Storage** accounts provide a unique namespace in Azure for your data. -* **Containers** are located in storage accounts and organize sets of blobs. -* **Blobs** are located in containers and store text and binary data. - -> [!NOTE] -> -> * If your Azure storage account is protected by a virtual network or firewall, you can't grant access by using a SAS token. You'll have to use a [managed identity](managed-identity-byos.md) to grant access to your storage resource. -> * [Managed identity](managed-identity-byos.md) supports both privately and publicly accessible Azure Blob Storage accounts. -> - -## When to use a shared access signature - -* If you're using storage containers with public access, you can opt to use a SAS token to grant limited access to your storage resources. -* When you're training a custom model, your assembled set of training documents *must* be uploaded to an Azure Blob Storage container. You can grant permission to your training resources with a user delegation SAS token. - -## Prerequisites - -To get started, you'll need: - -* An active [Azure account](https://azure.microsoft.com/free/cognitive-services/). If you don't have one, you can [create a free account](https://azure.microsoft.com/free/). -* A [Form Recognizer](https://portal.azure.com/#create/Microsoft.CognitiveServicesFormRecognizer) or [Cognitive Services multi-service](https://portal.azure.com/#create/Microsoft.CognitiveServicesAllInOne) resource. -* A **standard performance** [Azure Blob Storage account](https://portal.azure.com/#create/Microsoft.StorageAccount-ARM). You'll create containers to store and organize your blob data within your storage account. If you don't know how to create an Azure storage account with a container, following these quickstarts: - - * [Create a storage account](../../storage/common/storage-account-create.md). When you create your storage account, select **Standard** performance in the **Instance details** > **Performance** field. - * [Create a container](../../storage/blobs/storage-quickstart-blobs-portal.md#create-a-container). When you create your container, set **Public access level** to **Container** (anonymous read access for containers and blobs) in the **New Container** window. - -## Upload your documents - -1. Go to the [Azure portal](https://portal.azure.com/#home). Select **Your storage account** > **Data storage** > **Containers**. - - :::image type="content" source="media/sas-tokens/data-storage-menu.png" alt-text="Screenshot that shows the Data storage menu in the Azure portal."::: - -1. Select a container from the list. -1. Select **Upload** from the menu at the top of the page. - - :::image type="content" source="media/sas-tokens/container-upload-button.png" alt-text="Screenshot that shows the container Upload button in the Azure portal."::: - - The **Upload blob** window appears. -1. Select your files to upload. - - :::image type="content" source="media/sas-tokens/upload-blob-window.png" alt-text="Screenshot that shows the Upload blob window in the Azure portal."::: - -> [!NOTE] -> By default, the REST API uses form documents located at the root of your container. You can also use data organized in subfolders if specified in the API call. For more information, see [Organize your data in subfolders](./build-training-data-set.md#organize-your-data-in-subfolders-optional). - -## Create a shared access signature with the Azure portal - -> [!IMPORTANT] -> -> Generate and retrieve the shared access signature for your container, not for the storage account itself. - -1. In the [Azure portal](https://portal.azure.com/#home), select **Your storage account** > **Containers**. -1. Select a container from the list. -1. Go to the right of the main window, and select the three ellipses associated with your chosen container. -1. Select **Generate SAS** from the dropdown menu to open the **Generate SAS** window. - - :::image type="content" source="media/sas-tokens/generate-sas.png" alt-text="Screenshot that shows the Generate SAS token dropdown menu in the Azure portal."::: - -1. Select **Signing method** > **User delegation key**. - -1. Define **Permissions** by selecting or clearing the appropriate checkbox. Make sure the **Read**, **Write**, **Delete**, and **List** permissions are selected. - - :::image type="content" source="media/sas-tokens/sas-permissions.png" alt-text="Screenshot that shows the SAS permission fields in the Azure portal."::: - - >[!IMPORTANT] - > - > * If you receive a message similar to the following one, you'll need to assign access to the blob data in your storage account: - > - > :::image type="content" source="media/sas-tokens/need-permissions.png" alt-text="Screenshot that shows the lack of permissions warning."::: - > - > * [Azure role-based access control](../../role-based-access-control/overview.md) (Azure RBAC) is the authorization system used to manage access to Azure resources. Azure RBAC helps you manage access and permissions for your Azure resources. - > * [Assign an Azure role for access to blob data](../../role-based-access-control/role-assignments-portal.md?tabs=current) shows you how to assign a role that allows for read, write, and delete permissions for your Azure storage container. For example, see [Storage Blob Data Contributor](../../role-based-access-control/built-in-roles.md#storage-blob-data-contributor). - -1. Specify the signed key **Start** and **Expiry** times. The value for the expiry time is a maximum of seven days from the start of the shared access signature. - -1. The **Allowed IP addresses** field is optional and specifies an IP address or a range of IP addresses from which to accept requests. If the request IP address doesn't match the IP address or address range specified on the SAS token, it won't be authorized. - -1. The **Allowed protocols** field is optional and specifies the protocol permitted for a request made with the shared access signature. The default value is HTTPS. - -1. Select **Generate SAS token and URL**. - -1. The **Blob SAS token** query string and **Blob SAS URL** appear in the lower area of the window. To use the Blob SAS token, append it to a storage service URI. - -1. Copy and paste the **Blob SAS token** and **Blob SAS URL** values in a secure location. They're displayed only once and can't be retrieved after the window is closed. - -## Create a shared access signature with the Azure CLI - -1. To create a user delegation SAS for a container by using the Azure CLI, make sure that you've installed version 2.0.78 or later. To check your installed version, use the `az --version` command. - -1. Call the [az storage container generate-sas](/cli/azure/storage/container#az-storage-container-generate-sas) command. - -1. The following parameters are required: - - * `auth-mode login`. This parameter ensures that requests made to Azure Storage are authorized with your Azure AD credentials. - * `as-user`. This parameter indicates that the generated SAS is a user delegation SAS. - -1. Supported permissions for a user delegation SAS on a container include Add (a), Create (c), Delete (d), List (l), Read (r), and Write (w). Make sure **r**, **w**, **d**, and **l** are included as part of the permissions parameters. - -1. When you create a user delegation SAS with the Azure CLI, the maximum interval during which the user delegation key is valid is seven days from the start date. Specify an expiry time for the shared access signature that's within seven days of the start time. For more information, see [Create a user delegation SAS for a container or blob with the Azure CLI](../../storage/blobs/storage-blob-user-delegation-sas-create-cli.md#use-azure-ad-credentials-to-secure-a-sas). - -### Example - -Generate a user delegation SAS. Replace the placeholder values in the brackets with your own values: - -```azurecli-interactive -az storage container generate-sas \ - --account-name \ - --name \ - --permissions rwdl \ - --expiry \ - --auth-mode login \ - --as-user -``` - -## Use your Blob SAS URL - -Two options are available: - -* To use your Blob SAS URL with the [REST API](https://westus.dev.cognitive.microsoft.com/docs/services/form-recognizer-api-v2-1/operations/TrainCustomModelAsync), add the SAS URL to the request body: - - ```json - { - "source":"" - } - ``` - -* To use your Blob SAS URL with the [Form Recognizer labeling tool](https://fott-2-1.azurewebsites.net/connections/create), add the SAS URL to the **Connection Settings** > **Azure blob container** > **SAS URI** field: - - :::image type="content" source="media/sas-tokens/fott-add-sas-uri.png" alt-text="Screenshot that shows the SAS URI field."::: - -That's it. You've learned how to generate SAS tokens to authorize how clients access your data. - -## Next step - -> [!div class="nextstepaction"] -> [Build a training data set](build-training-data-set.md) \ No newline at end of file diff --git a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/csharp-read.md b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/csharp-read.md index 45bfd24f0a69..b895b3d93559 100644 --- a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/csharp-read.md +++ b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/csharp-read.md @@ -1,6 +1,6 @@ --- title: "How to use the read model with C#/.NET programming language" -description: Use the Form Recognizer prebuilt-read model and C# to extract printed and handwritten text from documents. +description: Use the Form Recognizer prebuilt-read model and C# to extract printed (typeface) and handwritten text from documents. author: laujan manager: nitinme ms.service: applied-ai-services diff --git a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/java-read.md b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/java-read.md index c4982257af45..1ee507167090 100644 --- a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/java-read.md +++ b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/java-read.md @@ -1,6 +1,6 @@ --- title: "How to use the read model with Java programming language" -description: Use the Form Recognizer prebuilt-read model and Java to extract printed and handwritten text from documents. +description: Use the Form Recognizer prebuilt-read model and Java to extract printed (typeface) and handwritten text from documents. author: laujan manager: nitinme ms.service: applied-ai-services diff --git a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/javascript-read.md b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/javascript-read.md index 1dc3e5affe33..da8373668309 100644 --- a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/javascript-read.md +++ b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/javascript-read.md @@ -1,6 +1,6 @@ --- title: "How to use the read model with JavaScript programming language" -description: Use the Form Recognizer prebuilt-read model and JavaScript to extract printed and handwritten text from documents. +description: Use the Form Recognizer prebuilt-read model and JavaScript to extract printed (typeface) and handwritten text from documents. author: laujan manager: nitinme ms.service: applied-ai-services diff --git a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/python-read.md b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/python-read.md index afa500a5c985..dd42ae66c6b6 100644 --- a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/python-read.md +++ b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/python-read.md @@ -1,6 +1,6 @@ --- title: "How to use the read model with Python programming language" -description: Use the Form Recognizer prebuilt-read model and Python to extract printed and handwritten text from documents. +description: Use the Form Recognizer prebuilt-read model and Python to extract printed (typeface) and handwritten text from documents. author: laujan manager: nitinme ms.service: applied-ai-services diff --git a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/rest-api-read.md b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/rest-api-read.md index 521275a21c7f..2ccc5803d74a 100644 --- a/articles/applied-ai-services/form-recognizer/how-to-guides/includes/rest-api-read.md +++ b/articles/applied-ai-services/form-recognizer/how-to-guides/includes/rest-api-read.md @@ -1,6 +1,6 @@ --- title: "How to use the read model with the Form Recognizer REST API" -description: Use the Form Recognizer prebuilt-read model and REST API to extract printed and handwritten text from documents. +description: Use the Form Recognizer prebuilt-read model and REST API to extract printed (typeface) and handwritten text from documents. author: laujan manager: nitinme ms.service: applied-ai-services diff --git a/articles/applied-ai-services/form-recognizer/how-to-guides/use-prebuilt-read.md b/articles/applied-ai-services/form-recognizer/how-to-guides/use-prebuilt-read.md index fa0ba0418519..1a3517f6f8e9 100644 --- a/articles/applied-ai-services/form-recognizer/how-to-guides/use-prebuilt-read.md +++ b/articles/applied-ai-services/form-recognizer/how-to-guides/use-prebuilt-read.md @@ -15,7 +15,7 @@ recommendations: false # Use the Read Model - In this how-to guide, you'll learn to use Azure Form Recognizer's [read model](../concept-read.md) to extract printed and handwritten text from documents. The read model can detect lines, words, locations, and languages. You can use a programming language of your choice or the REST API. We recommend that you use the free service when you're learning the technology. Remember that the number of free pages is limited to 500 per month. + In this how-to guide, you'll learn to use Azure Form Recognizer's [read model](../concept-read.md) to extract typeface and handwritten text from documents. The read model can detect lines, words, locations, and languages. You can use a programming language of your choice or the REST API. We recommend that you use the free service when you're learning the technology. Remember that the number of free pages is limited to 500 per month. The read model is the core of all the other Form Recognizer models. Layout, general document, custom, and prebuilt models all use the read model as a foundation for extracting texts from documents. diff --git a/articles/applied-ai-services/form-recognizer/index.yml b/articles/applied-ai-services/form-recognizer/index.yml index d2d0a400e401..624d570266f4 100644 --- a/articles/applied-ai-services/form-recognizer/index.yml +++ b/articles/applied-ai-services/form-recognizer/index.yml @@ -85,8 +85,8 @@ landingContent: url: v3-migration-guide.md - text: Use the read model url: how-to-guides/use-prebuilt-read.md - - text: Generate SAS tokens for Azure Blob containers - url: generate-sas-tokens.md + - text: Create SAS tokens for storage containers + url: create-sas-tokens.md - text: Build a custom model (v3.0) url: how-to-guides/build-custom-model-v3.md - text: Compose custom models (v3.0) diff --git a/articles/applied-ai-services/form-recognizer/overview.md b/articles/applied-ai-services/form-recognizer/overview.md index 2676c0064275..52422122ce7f 100644 --- a/articles/applied-ai-services/form-recognizer/overview.md +++ b/articles/applied-ai-services/form-recognizer/overview.md @@ -25,7 +25,7 @@ Form Recognizer uses the following models to easily identify, extract, and analy **Document analysis models** -* [**Read model**](concept-read.md) | Extract printed and handwritten text lines, words, locations, and detected languages from documents and images. +* [**Read model**](concept-read.md) | Extract typeface and handwritten text lines, words, locations, and detected languages from documents and images. * [**Layout model**](concept-layout.md) | Extract text, tables, selection marks, and structure information from documents (PDF and TIFF) and images (JPG, PNG, and BMP). * [**General document model**](concept-general-document.md) | Extract key-value pairs, selection marks, and entities from documents. diff --git a/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-rest-api.md b/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-rest-api.md index bb1e8bcce475..342feb252e7f 100644 --- a/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-rest-api.md +++ b/articles/applied-ai-services/form-recognizer/quickstarts/try-v3-rest-api.md @@ -31,7 +31,7 @@ To learn more about Form Recognizer features and development options, visit our **Document Analysis** -* 🆕 Read—Analyze and extract printed and handwritten text lines, words, locations, and detected languages. +* 🆕 Read—Analyze and extract printed (typeface) and handwritten text lines, words, locations, and detected languages. * 🆕General document—Analyze and extract text, tables, structure, key-value pairs, and named entities. * Layout—Analyze and extract tables, lines, words, and selection marks from documents, without the need to train a model. diff --git a/articles/applied-ai-services/form-recognizer/rest/api/storageservices/create-user-delegation-sas b/articles/applied-ai-services/form-recognizer/rest/api/storageservices/create-user-delegation-sas new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/articles/applied-ai-services/form-recognizer/toc.yml b/articles/applied-ai-services/form-recognizer/toc.yml index dba6bde2ab60..db5be34e7607 100644 --- a/articles/applied-ai-services/form-recognizer/toc.yml +++ b/articles/applied-ai-services/form-recognizer/toc.yml @@ -48,8 +48,8 @@ items: href: how-to-guides/use-prebuilt-read.md - name: Use SDKs and the REST API (v2.1) href: how-to-guides/try-sdk-rest-api.md - - name: Create SAS tokens for Azure Blob storage - href: generate-sas-tokens.md + - name: Create SAS tokens for storage containers + href: create-sas-tokens.md - name: Custom models items: - name: Build a custom model (v2.1) diff --git a/articles/automation/automation-hrw-run-runbooks.md b/articles/automation/automation-hrw-run-runbooks.md index faedab838379..75801df52299 100644 --- a/articles/automation/automation-hrw-run-runbooks.md +++ b/articles/automation/automation-hrw-run-runbooks.md @@ -114,9 +114,9 @@ Follow the next steps to use a managed identity for Azure resources on a Hybrid For instance, a runbook with `Get-AzVM` can return all the VMs in the subscription with no call to `Connect-AzAccount`, and the user would be able to access Azure resources without having to authenticate within that runbook. You can disable context autosave in Azure PowerShell, as detailed [here](/powershell/azure/context-persistence?view=azps-7.3.2#save-azure-contexts-across-powershell-sessions). -### Use runbook authentication with Run As account +### Use runbook authentication with Hybrid Worker Credentials -Instead of having your runbook provide its own authentication to local resources, you can specify a Run As account for a Hybrid Runbook Worker group. To specify a Run As account, you must define a [credential asset](./shared-resources/credentials.md) that has access to local resources. These resources include certificate stores and all runbooks run under these credentials on a Hybrid Runbook Worker in the group. +Instead of having your runbook provide its own authentication to local resources, you can specify Hybrid Worker Credentials for a Hybrid Runbook Worker group. To specify a Hybrid Worker Credentials, you must define a [credential asset](./shared-resources/credentials.md) that has access to local resources. These resources include certificate stores and all runbooks run under these credentials on a Hybrid Runbook Worker in the group. - The user name for the credential must be in one of the following formats: @@ -126,16 +126,35 @@ Instead of having your runbook provide its own authentication to local resources - To use the PowerShell runbook **Export-RunAsCertificateToHybridWorker**, you need to install the Az modules for Azure Automation on the local machine. -#### Use a credential asset to specify a Run As account +#### Use a credential asset for a Hybrid Runbook Worker group -Use the following procedure to specify a Run As account for a Hybrid Runbook Worker group: +By default, the Hybrid jobs run under the context of System account. However, to run Hybrid jobs under a different credential asset, follow the steps: 1. Create a [credential asset](./shared-resources/credentials.md) with access to local resources. 1. Open the Automation account in the Azure portal. 1. Select **Hybrid Worker Groups**, and then select the specific group. -1. Select **All settings**, followed by **Hybrid worker group settings**. -1. Change the value of **Run As** from **Default** to **Custom**. +1. Select **Settings**. +1. Change the value of **Hybrid Worker credentials** from **Default** to **Custom**. 1. Select the credential and click **Save**. +1. If the following permissions are not assigned for Custom users, jobs might get suspended. +Use your discretion in assigning the elevated permissions corresponding to the following registry keys/folders: + +**Registry path** + +- HKLM\SYSTEM\CurrentControlSet\Services\EventLog (read)
    +- HKLM\SYSTEM\CurrentControlSet\Services\WinSock2\Parameters (full access)
    +- HKLM\SOFTWARE\Microsoft\Wbem\CIMOM (full access)
    +- HKLM\Software\Policies\Microsoft\SystemCertificates\Root (full access)
    +- HKLM\Software\Microsoft\SystemCertificates (full access)
    +- HKLM\Software\Microsoft\EnterpriseCertificates (full access)
    +- HKLM\software\Microsoft\HybridRunbookWorker (full access)
    +- HKLM\software\Microsoft\HybridRunbookWorkerV2 (full access)
    +- HKEY_CURRENT_USER\SOFTWARE\Policies\Microsoft\SystemCertificates\Disallowed (full access)
    +- HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\Setup\PnpLockdownFiles (full access)
    + +**Folders** +- C:\ProgramData\AzureConnectedMachineAgent\Tokens (read)
    +- C:\Packages\Plugins\Microsoft.Azure.Automation.HybridWorker.HybridWorkerForWindows\0.1.0.18\HybridWorkerPackage\HybridWorkerAgent (full access) ## Install Run As account certificate diff --git a/articles/automation/extension-based-hybrid-runbook-worker-install.md b/articles/automation/extension-based-hybrid-runbook-worker-install.md index ea91259d254d..6e824c7fbd38 100644 --- a/articles/automation/extension-based-hybrid-runbook-worker-install.md +++ b/articles/automation/extension-based-hybrid-runbook-worker-install.md @@ -164,10 +164,10 @@ To create a hybrid worker group in the Azure portal, follow these steps: 1. From the **Basics** tab, in the **Name** text box, enter a name for your Hybrid worker group. -1. For the **Use run as credential** option: +1. For the **Use Hybrid Worker Credentials** option: - - If you select **No**, the hybrid extension will be installed using the local system account. - - If you select **Yes**, then from the drop-down list, select the credential asset. + - If you select **Default**, the hybrid extension will be installed using the local system account. + - If you select **Custom**, then from the drop-down list, select the credential asset. 1. Select **Next** to advance to the **Hybrid workers** tab. You can select Azure virtual machines or Azure Arc-enabled servers to be added to this Hybrid worker group. If you don't select any machines, an empty Hybrid worker group will be created. You can still add machines later. @@ -622,7 +622,7 @@ To install and use Hybrid Worker extension using REST API, follow these steps. T 1. Get the automation account details using this API call. ```http - GET https://westcentralus.management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}?api-version=2021-06-22 + GET https://westcentralus.management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/HybridWorkerExtension?api-version=2021-06-22 ``` @@ -631,7 +631,7 @@ To install and use Hybrid Worker extension using REST API, follow these steps. T 1. Install the Hybrid Worker Extension on Azure VM by using the following API call. ```http - PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}?api-version=2021-11-01 + PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/HybridWorkerExtension?api-version=2021-11-01 ``` diff --git a/articles/azure-arc/data/active-directory-introduction.md b/articles/azure-arc/data/active-directory-introduction.md index 4e2aa81f5f33..badf4919ec5e 100644 --- a/articles/azure-arc/data/active-directory-introduction.md +++ b/articles/azure-arc/data/active-directory-introduction.md @@ -12,8 +12,11 @@ ms.topic: how-to --- # Azure Arc-enabled SQL Managed Instance with Active Directory authentication + Azure Arc-enabled data services support Active Directory (AD) for Identity and Access Management (IAM). The Arc-enabled SQL Managed Instance uses an existing on-premises Active Directory (AD) domain for authentication. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + This article describes how to enable Azure Arc-enabled SQL Managed Instance with Active Directory (AD) Authentication. The article demonstrates two possible AD integration modes: - Customer-managed keytab (CMK) - System-managed keytab (SMK) diff --git a/articles/azure-arc/data/active-directory-prerequisites.md b/articles/azure-arc/data/active-directory-prerequisites.md index 97abd3782696..fafd2be89973 100644 --- a/articles/azure-arc/data/active-directory-prerequisites.md +++ b/articles/azure-arc/data/active-directory-prerequisites.md @@ -15,6 +15,8 @@ ms.topic: how-to This document explains how to prepare to deploy Azure Arc-enabled data services with Active Directory (AD) authentication. Specifically the article describes Active Directory objects you need to configure before the deployment of Kubernetes resources. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + [The introduction](active-directory-introduction.md#compare-ad-integration-modes) describes two different integration modes: - *System-managed keytab* mode allows the system to create and manage the AD accounts for each SQL Managed Instance. - *Customer-managed keytab* mode allows you to create and manage the AD accounts for each SQL Managed Instance. diff --git a/articles/azure-arc/data/configure-managed-instance.md b/articles/azure-arc/data/configure-managed-instance.md index e60fcc4ca9cd..fd08daf42e3e 100644 --- a/articles/azure-arc/data/configure-managed-instance.md +++ b/articles/azure-arc/data/configure-managed-instance.md @@ -7,7 +7,7 @@ ms.subservice: azure-arc-data author: dnethi ms.author: dinethi ms.reviewer: mikeray -ms.date: 02/22/2022 +ms.date: 05/27/2022 ms.topic: how-to --- @@ -45,6 +45,39 @@ To view the changes made to the Azure Arc-enabled SQL managed instance, you can az sql mi-arc show -n --k8s-namespace --use-k8s ``` +## Configure readable secondaries + +When you deploy Azure Arc enabled SQL managed instance in `BusinessCritical` service tier with 2 or more replicas, by default, one secondary replica is automatically configured as `readableSecondary`. This setting can be changed, either to add or to remove the readable secondaries as follows: + +```azurecli +az sql mi-arc update --name --readable-secondaries --k8s-namespace --use-k8s +``` + +For example, the following example will reset the readable secondaries to 0. + +```azurecli +az sql mi-arc update --name sqlmi1 --readable-secondaries 0 --k8s-namespace mynamespace --use-k8s +``` +## Configure replicas + +You can also scale up or down the number of replicas deployed in the `BusinessCritical` service tier as follows: + +```azurecli +az sql mi-arc update --name --replicas --k8s-namespace --use-k8s +``` + +For example: + +The following example will scale down the number of replicas from 3 to 2. + +```azurecli +az sql mi-arc update --name sqlmi1 --replicas 2 --k8s-namespace mynamespace --use-k8s +``` + +> [Note] +> If you scale down from 2 replicas to 1 replica, you may run into a conflict with the pre-configured `--readable--secondaries` setting. You can first edit the `--readable--secondaries` before scaling down the replicas. + + ## Configure Server options You can configure server configuration settings for Azure Arc-enabled SQL managed instance after creation time. This article describes how to configure settings like enabling or disabling mssql Agent, enable specific trace flags for troubleshooting scenarios. diff --git a/articles/azure-arc/data/connect-active-directory-sql-managed-instance.md b/articles/azure-arc/data/connect-active-directory-sql-managed-instance.md index e9d9c4902c05..cb952f4ce797 100644 --- a/articles/azure-arc/data/connect-active-directory-sql-managed-instance.md +++ b/articles/azure-arc/data/connect-active-directory-sql-managed-instance.md @@ -15,6 +15,8 @@ ms.topic: how-to This article describes how to connect to SQL Managed Instance endpoint using Active Directory (AD) authentication. Before you proceed, make sure you have an AD-integrated Azure Arc-enabled SQL Managed Instance deployed already. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + See [Tutorial – Deploy AD-integrated SQL Managed Instance](deploy-active-directory-sql-managed-instance.md) to deploy Azure Arc-enabled SQL Managed Instance with Active Directory authentication enabled. > [!NOTE] diff --git a/articles/azure-arc/data/create-data-controller-direct-cli.md b/articles/azure-arc/data/create-data-controller-direct-cli.md index 81599d2c97be..014983d40ec6 100644 --- a/articles/azure-arc/data/create-data-controller-direct-cli.md +++ b/articles/azure-arc/data/create-data-controller-direct-cli.md @@ -8,7 +8,7 @@ ms.reviewer: mikeray services: azure-arc ms.service: azure-arc ms.subservice: azure-arc-data -ms.date: 03/24/2022 +ms.date: 05/27/2022 ms.topic: overview --- @@ -225,18 +225,18 @@ Optionally, you can specify certificates for logs and metrics UI dashboards. See After the extension and custom location are created, proceed to deploy the Azure Arc data controller as follows. ```azurecli -az arcdata dc create --name --resource-group --location --connectivity-mode direct --profile-name --auto-upload-logs true --auto-upload-metrics true --custom-location --storage-class +az arcdata dc create --name --resource-group --location --connectivity-mode direct --profile-name --auto-upload-metrics true --custom-location --storage-class # Example -az arcdata dc create --name arc-dc1 --resource-group my-resource-group --location eastasia --connectivity-mode direct --profile-name azure-arc-aks-premium-storage --auto-upload-logs true --auto-upload-metrics true --custom-location mycustomlocation --storage-class mystorageclass +az arcdata dc create --name arc-dc1 --resource-group my-resource-group --location eastasia --connectivity-mode direct --profile-name azure-arc-aks-premium-storage --auto-upload-metrics true --custom-location mycustomlocation --storage-class mystorageclass ``` If you want to create the Azure Arc data controller using a custom configuration template, follow the steps described in [Create custom configuration profile](create-custom-configuration-template.md) and provide the path to the file as follows: ```azurecli -az arcdata dc create --name --resource-group --location --connectivity-mode direct --path ./azure-arc-custom --auto-upload-logs true --auto-upload-metrics true --custom-location +az arcdata dc create --name --resource-group --location --connectivity-mode direct --path ./azure-arc-custom --auto-upload-metrics true --custom-location # Example -az arcdata dc create --name arc-dc1 --resource-group my-resource-group --location eastasia --connectivity-mode direct --path ./azure-arc-custom --auto-upload-logs true --auto-upload-metrics true --custom-location mycustomlocation +az arcdata dc create --name arc-dc1 --resource-group my-resource-group --location eastasia --connectivity-mode direct --path ./azure-arc-custom --auto-upload-metrics true --custom-location mycustomlocation ``` ## Monitor the status of Azure Arc data controller deployment diff --git a/articles/azure-arc/data/deploy-active-directory-connector-cli.md b/articles/azure-arc/data/deploy-active-directory-connector-cli.md index 19e31a3fbde3..535b8389e437 100644 --- a/articles/azure-arc/data/deploy-active-directory-connector-cli.md +++ b/articles/azure-arc/data/deploy-active-directory-connector-cli.md @@ -16,6 +16,8 @@ ms.topic: how-to This article explains how to deploy an Active Directory (AD) connector using Azure CLI. The AD connector is a key component to enable Active Directory authentication on Azure Arc-enabled SQL Managed Instance. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + ## Prerequisites ### Install tools diff --git a/articles/azure-arc/data/deploy-active-directory-connector-portal.md b/articles/azure-arc/data/deploy-active-directory-connector-portal.md index 62b0f97d949b..15f734ab3f45 100644 --- a/articles/azure-arc/data/deploy-active-directory-connector-portal.md +++ b/articles/azure-arc/data/deploy-active-directory-connector-portal.md @@ -15,6 +15,8 @@ ms.topic: how-to Active Directory (AD) connector is a key component to enable Active Directory authentication on Azure Arc-enabled SQL Managed Instances. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + This article explains how to deploy, manage, and delete an Active Directory (AD) connector in directly connected mode from the Azure portal. ## Prerequisites diff --git a/articles/azure-arc/data/deploy-active-directory-sql-managed-instance-cli.md b/articles/azure-arc/data/deploy-active-directory-sql-managed-instance-cli.md index 7bd2bef7df04..e6e78d2b6fca 100644 --- a/articles/azure-arc/data/deploy-active-directory-sql-managed-instance-cli.md +++ b/articles/azure-arc/data/deploy-active-directory-sql-managed-instance-cli.md @@ -15,6 +15,8 @@ ms.topic: how-to This article explains how to deploy Azure Arc-enabled SQL Managed Instance with Active Directory (AD) authentication using Azure CLI. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + See these articles for specific instructions: - [Tutorial – Deploy AD connector in customer-managed keytab mode](deploy-customer-managed-keytab-active-directory-connector.md) diff --git a/articles/azure-arc/data/deploy-active-directory-sql-managed-instance.md b/articles/azure-arc/data/deploy-active-directory-sql-managed-instance.md index 84191fd1e79c..3b749ec7cf09 100644 --- a/articles/azure-arc/data/deploy-active-directory-sql-managed-instance.md +++ b/articles/azure-arc/data/deploy-active-directory-sql-managed-instance.md @@ -15,6 +15,8 @@ ms.topic: how-to This article explains how to deploy Azure Arc-enabled SQL Managed Instance with Active Directory (AD) authentication. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + Before you proceed, complete the steps explained in [Customer-managed keytab Active Directory (AD) connector](deploy-customer-managed-keytab-active-directory-connector.md) or [Deploy a system-managed keytab AD connector](deploy-system-managed-keytab-active-directory-connector.md) ## Prerequisites diff --git a/articles/azure-arc/data/deploy-customer-managed-keytab-active-directory-connector.md b/articles/azure-arc/data/deploy-customer-managed-keytab-active-directory-connector.md index ad7eb142aa67..a31373f2d685 100644 --- a/articles/azure-arc/data/deploy-customer-managed-keytab-active-directory-connector.md +++ b/articles/azure-arc/data/deploy-customer-managed-keytab-active-directory-connector.md @@ -15,6 +15,8 @@ ms.topic: how-to This article explains how to deploy Active Directory (AD) connector in customer-managed keytab mode. The connector is a key component to enable Active Directory authentication on Azure Arc-enabled SQL Managed Instance. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + ## Active Directory connector in customer-managed keytab mode In customer-managed keytab mode, an Active Directory connector deploys a DNS proxy service that proxies the DNS requests coming from the managed instance to either of the two upstream DNS services: diff --git a/articles/azure-arc/data/deploy-system-managed-keytab-active-directory-connector.md b/articles/azure-arc/data/deploy-system-managed-keytab-active-directory-connector.md index 1af400237870..9c10c7634630 100644 --- a/articles/azure-arc/data/deploy-system-managed-keytab-active-directory-connector.md +++ b/articles/azure-arc/data/deploy-system-managed-keytab-active-directory-connector.md @@ -16,6 +16,8 @@ ms.topic: how-to This article explains how to deploy Active Directory connector in system-managed keytab mode. It is a key component to enable Active Directory authentication on Azure Arc-enabled SQL Managed Instance. +[!INCLUDE [azure-arc-data-preview](../../../includes/azure-arc-data-preview.md)] + ## Active Directory connector in system-managed keytab mode In System-Managed Keytab mode, an Active Directory connector deploys a DNS proxy service that proxies the DNS requests coming from the managed instance to either of the two upstream DNS services: diff --git a/articles/azure-arc/data/toc.yml b/articles/azure-arc/data/toc.yml index 56f797615543..ea19d6c4f38a 100644 --- a/articles/azure-arc/data/toc.yml +++ b/articles/azure-arc/data/toc.yml @@ -73,6 +73,8 @@ items: href: uninstall-azure-arc-data-controller.md - name: Manage items: + - name: Upload usage data + href: upload-usage-data.md - name: Monitor with Grafana & Kibana href: monitor-grafana-kibana.md - name: Upload to Azure Monitor @@ -83,8 +85,6 @@ items: href: upload-logs.md - name: Upload metrics href: upload-metrics.md - - name: Upload usage data - href: upload-usage-data.md - name: Inventory database instances href: view-arc-data-services-inventory-in-azure-portal.md - name: Update service principal credentials diff --git a/articles/azure-arc/data/troubleshoot-guide.md b/articles/azure-arc/data/troubleshoot-guide.md index 5a3c0d04128d..1d35158ec2ac 100644 --- a/articles/azure-arc/data/troubleshoot-guide.md +++ b/articles/azure-arc/data/troubleshoot-guide.md @@ -4,10 +4,10 @@ description: Introduction to troubleshooting resources services: azure-arc ms.service: azure-arc ms.subservice: azure-arc-data -author: twright-msft -ms.author: twright +author: dnethi +ms.author: dinethi ms.reviewer: mikeray -ms.date: 07/30/2021 +ms.date: 05/27/2022 ms.topic: how-to --- @@ -16,6 +16,60 @@ ms.topic: how-to This article identifies troubleshooting resources for Azure Arc-enabled data services. +## Logs Upload related errors + +If you deployed Azure Arc data controller in the `direct` connectivity mode using `kubectl`, and have not created a secret for the Log Analytics workspace credentials, you may see the following error messages in the Data Controller CR (Custom Resource): + +``` +status": { + "azure": { + "uploadStatus": { + "logs": { + "lastUploadTime": "YYYY-MM-HHTMM:SS:MS.SSSSSSZ", + "message": "spec.settings.azure.autoUploadLogs is true, but failed to get log-workspace-secret secret." + }, + +``` + +To resolve the above error, create a secret with the Log Analytics Workspace credentials containing the `WorkspaceID` and the `SharedAccessKey` as follows: + +``` +apiVersion: v1 +data: + primaryKey: + workspaceId: +kind: Secret +metadata: + name: log-workspace-secret + namespace: +type: Opaque + +``` + +## Metrics upload related errors in direct connected mode + +If you configured automatic upload of metrics, in the direct connected mode and the permissions needed for the MSI have not been properly granted (as described in [Upload metrics](upload-metrics.md)), you might see an error in your logs as follows: + +```output +'Metric upload response: {"error":{"code":"AuthorizationFailed","message":"Check Access Denied Authorization for AD object XXXXXXXXX-XXXX-XXXX-XXXXX-XXXXXXXXXXX over scope /subscriptions/XXXXXXXXX-XXXX-XXXX-XXXXX-XXXXXXXXXXX/resourcegroups/my-resource-group/providers/microsoft.azurearcdata/sqlmanagedinstances/arc-dc, User Tenant Id: XXXXXXXXX-XXXX-XXXX-XXXXX-XXXXXXXXXXX. Microsoft.Insights/Metrics/write was not allowed, Microsoft.Insights/Telemetry/write was notallowed. Warning: Principal will be blocklisted if the service principal is not granted proper access while it hits the GIG endpoint continuously."}} +``` + +To resolve above error, retrieve the MSI for the Azure Arc data controller extension, and grant the required roles as described in [Upload metrics](upload-metrics.md). + + +## Usage upload related errors in direct connected mode + +If you deployed your Azure Arc data controller in the direct connected mode the permissions needed to upload your usage information are automatically granted for the Azure Arc data controller extension MSI. If the automatic upload process runs into permissions related issues you might see an error in your logs as follows: + +``` +identified that your data controller stopped uploading usage data to Azure. The error was: + +{"lastUploadTime":"2022-05-05T20:10:47.6746860Z","message":"Data controller upload response: {\"error\":{\"code\":\"AuthorizationFailed\",\"message\":\"The client 'XXXXXXXXX-XXXX-XXXX-XXXXX-XXXXXXXXXXX' with object id 'XXXXXXXXX-XXXX-XXXX-XXXXX-XXXXXXXXXXX' does not have authorization to perform action 'microsoft.azurearcdata/datacontrollers/write' over scope '/subscriptions/XXXXXXXXX-XXXX-XXXX-XXXXX-XXXXXXXXXXX/resourcegroups/my-resource-group/providers/microsoft.azurearcdata/datacontrollers/arc-dc' or the scope is invalid. If access was recently granted, please refresh your credentials.\"}}"} +``` + +To resolve the permissions issue, retrieve the MSI and grant the required roles as described in [Upload metrics](upload-metrics.md)). + + ## Resources by type [Scenario: Troubleshooting PostgreSQL Hyperscale server groups](troubleshoot-postgresql-hyperscale-server-group.md) diff --git a/articles/azure-arc/data/upload-usage-data.md b/articles/azure-arc/data/upload-usage-data.md index 78aeed4ccfe9..932d62fe8d50 100644 --- a/articles/azure-arc/data/upload-usage-data.md +++ b/articles/azure-arc/data/upload-usage-data.md @@ -4,16 +4,16 @@ description: Upload usage Azure Arc-enabled data services data to Azure services: azure-arc ms.service: azure-arc ms.subservice: azure-arc-data -author: twright-msft -ms.author: twright +author: dnethi +ms.author: dinethi ms.reviewer: mikeray -ms.date: 11/03/2021 +ms.date: 05/27/2022 ms.topic: how-to --- # Upload usage data to Azure in **indirect** mode -Periodically, you can export out usage information. The export and upload of this information creates and updates the data controller, SQL managed instance, and PostgreSQL Hyperscale server group resources in Azure. +Periodically, you can export out usage information. The export and upload of this information creates and updates the data controller, SQL managed instance, and PostgreSQL resources in Azure. > [!NOTE] > Usage information is automatically uploaded for Azure Arc data controller deployed in **direct** connectivity mode. The instructions in this article only apply to uploading usage information for Azure Arc data controller deployed in **indirect** connectivity mode.. @@ -42,12 +42,12 @@ Usage information such as inventory and resource usage can be uploaded to Azure az arcdata dc export --type usage --path usage.json --k8s-namespace --use-k8s ``` - This command creates a `usage.json` file with all the Azure Arc-enabled data resources such as SQL managed instances and PostgreSQL Hyperscale instances etc. that are created on the data controller. + This command creates a `usage.json` file with all the Azure Arc-enabled data resources such as SQL managed instances and PostgreSQL instances etc. that are created on the data controller. For now, the file is not encrypted so that you can see the contents. Feel free to open in a text editor and see what the contents look like. -You will notice that there are two sets of data: `resources` and `data`. The `resources` are the data controller, PostgreSQL Hyperscale server groups, and SQL Managed Instances. The `resources` records in the data capture the pertinent events in the history of a resource - when it was created, when it was updated, and when it was deleted. The `data` records capture how many cores were available to be used by a given instance for every hour. +You will notice that there are two sets of data: `resources` and `data`. The `resources` are the data controller, PostgreSQL, and SQL Managed Instances. The `resources` records in the data capture the pertinent events in the history of a resource - when it was created, when it was updated, and when it was deleted. The `data` records capture how many cores were available to be used by a given instance for every hour. Example of a `resource` entry: @@ -104,6 +104,18 @@ Example of a `data` entry: az arcdata dc upload --path usage.json ``` +## Upload frequency + +In the **indirect** mode, usage information needs to be uploaded to Azure at least once in every 30 days. It is highly recommended to upload more frequently, such as daily or weekly. If usage information is not uploaded past 32 days, you will see some degradation in the service such as being unable to provision any new resources. + +There will be two types of notifications for delayed usage uploads - warning phase and degraded phase. In the warning phase there will be a message such as `Billing data for the Azure Arc data controller has not been uploaded in {0} hours. Please upload billing data as soon as possible.`. + +In the degraded phase, the message will look like `Billing data for the Azure Arc data controller has not been uploaded in {0} hours. Some functionality will not be available until the billing data is uploaded.`. + +The Azure portal Overview page for Data Controller and the Custom Resource status of the Data controller in your kubernetes cluster will both indicate the last upload date and the status message(s). + + + ## Automating uploads (optional) If you want to upload metrics and logs on a scheduled basis, you can create a script and run it on a timer every few minutes. Below is an example of automating the uploads using a Linux shell script. diff --git a/articles/azure-arc/servers/network-requirements.md b/articles/azure-arc/servers/network-requirements.md index 5fcb05a656f0..fbf6fdfbc487 100644 --- a/articles/azure-arc/servers/network-requirements.md +++ b/articles/azure-arc/servers/network-requirements.md @@ -1,7 +1,7 @@ --- title: Connected Machine agent network requirements description: Learn about the networking requirements for using the Connected Machine agent for Azure Arc-enabled servers. -ms.date: 03/14/2022 +ms.date: 05/24/2022 ms.topic: conceptual --- @@ -29,6 +29,7 @@ Be sure to allow access to the following Service Tags: * AzureResourceManager * AzureArcInfrastructure * Storage +* WindowsAdminCenter (if [using Windows Admin Center to manage Arc-enabled servers](/windows-server/manage/windows-admin-center/azure/manage-arc-hybrid-machines)) For a list of IP addresses for each service tag/region, see the JSON file [Azure IP Ranges and Service Tags – Public Cloud](https://www.microsoft.com/download/details.aspx?id=56519). Microsoft publishes weekly updates containing each Azure Service and the IP ranges it uses. This information in the JSON file is the current point-in-time list of the IP ranges that correspond to each service tag. The IP addresses are subject to change. If IP address ranges are required for your firewall configuration, then the **AzureCloud** Service Tag should be used to allow access to all Azure services. Do not disable security monitoring or inspection of these URLs, allow them as you would other Internet traffic. @@ -51,6 +52,7 @@ The table below lists the URLs that must be available in order to install and us |`*.guestconfiguration.azure.com`| Extension management and guest configuration services |Always| Private | |`guestnotificationservice.azure.com`, `*.guestnotificationservice.azure.com`|Notification service for extension and connectivity scenarios|Always| Private | |`azgn*.servicebus.windows.net`|Notification service for extension and connectivity scenarios|Always| Public | +|`*servicebus.windows.net`|For Windows Admin Center and SSH scenarios|If using SSH or Windows Admin Center from Azure|Public| |`*.blob.core.windows.net`|Download source for Azure Arc-enabled servers extensions|Always, except when using private endpoints| Not used when private link is configured | |`dc.services.visualstudio.com`|Agent telemetry|Optional| Public | diff --git a/articles/azure-arc/servers/toc.yml b/articles/azure-arc/servers/toc.yml index 6c83c4ce5511..5aab1e49c98a 100644 --- a/articles/azure-arc/servers/toc.yml +++ b/articles/azure-arc/servers/toc.yml @@ -107,6 +107,8 @@ href: scenario-onboard-azure-sentinel.md - name: Onboard to Microsoft Defender for Cloud href: ../../defender-for-cloud/quickstart-onboard-machines.md?toc=/azure/azure-arc/servers/toc.json&bc=/azure/azure-arc/servers/breadcrumb/toc.json + - name: Manage with Windows Admin Center + href: /windows-server/manage/windows-admin-center/azure/manage-arc-hybrid-machines - name: Connect via SSH items: - name: SSH access to Azure Arc-enabled servers diff --git a/articles/azure-functions/TOC.yml b/articles/azure-functions/TOC.yml index 8a6126d2f9f7..e5a264a297f9 100644 --- a/articles/azure-functions/TOC.yml +++ b/articles/azure-functions/TOC.yml @@ -186,7 +186,7 @@ displayName: best practices - name: Compare runtime versions href: functions-versions.md - displayName: migrate, migration, v3 + displayName: migrate, migration, v3, v4, update, upgrade - name: Hosting and scale items: - name: Consumption plan diff --git a/articles/azure-functions/azure-functions-az-redundancy.md b/articles/azure-functions/azure-functions-az-redundancy.md index 3cd7949bd60c..18c2207d2edf 100644 --- a/articles/azure-functions/azure-functions-az-redundancy.md +++ b/articles/azure-functions/azure-functions-az-redundancy.md @@ -3,35 +3,39 @@ title: Azure Functions availability zone support on Elastic Premium plans description: Learn how to use availability zone redundancy with Azure Functions for high-availability function applications on Elastic Premium plans. ms.topic: conceptual ms.author: johnguo -ms.date: 09/07/2021 +ms.date: 03/24/2022 ms.custom: references_regions # Goal: Introduce AZ Redundancy in Azure Functions elastic premium plans to customers + a tutorial on how to get started with ARM templates --- # Azure Functions support for availability zone redundancy -Availability zone (AZ) support for Azure Functions is now available on Elastic Premium and Dedicated (App Service) plans. A Zone Redundant Azure Function application will automatically balance its instances between availability zones for higher availability. This document focuses on zone redundancy support for Elastic Premium Function plans. For zone redundancy on Dedicated plans, refer [here](../app-service/how-to-zone-redundancy.md). +Availability zone (AZ) support for Azure Functions is now available on Premium (Elastic Premium) and Dedicated (App Service) plans. A zone-redundant Functions application automatically balances its instances between availability zones for higher availability. This article focuses on zone redundancy support for Premium plans. For zone redundancy on Dedicated plans, refer [here](../app-service/how-to-zone-redundancy.md). + +[!INCLUDE [functions-premium-plan-note](../../includes/functions-premium-plan-note.md)] ## Overview -An [availability zone](../availability-zones/az-overview.md#availability-zones) is a high-availability offering that protects your applications and data from datacenter failures. Availability zones are unique physical locations within an Azure region. Each zone is made up of one or more datacenters equipped with independent power, cooling, and networking. To ensure resiliency, there's a minimum of three separate zones in all enabled regions. You can build high availability into your application architecture by co-locating your compute, storage, networking, and data resources within a zone and replicating in other zones. +An [availability zone](../availability-zones/az-overview.md#availability-zones) is a high-availability offering that protects your applications and data from datacenter failures. Availability zones are unique physical locations within an Azure region. Each zone comprises one or more datacenters equipped with independent power, cooling, and networking. To ensure resiliency, there's a minimum of three separate zones in all enabled regions. You can build high-availability into your application architecture by co-locating your compute, storage, networking, and data resources within a zone and replicating into other zones. -A zone redundant function app will automatically distribute load the instances that your app runs on between the availability zones in the region. For Zone Redundant Elastic Premium apps, even as the app scales in and out, the instances the app is running on are still evenly distributed between availability zones. +A zone redundant function app automatically distributes the instances your app runs on between the availability zones in the region. For apps running in a zone-redundant Premium plan, even as the app scales in and out, the instances the app is running on are still evenly distributed between availability zones. ## Requirements -> [!IMPORTANT] -> When selecting a [storage account](storage-considerations.md#storage-account-requirements) for your function app, be sure to use a [zone redundant storage account (ZRS)](../storage/common/storage-redundancy.md#zone-redundant-storage). Otherwise, in the case of a zonal outage, Functions may show unexpected behavior due to its dependency on Storage. +When hosting in a zone-redundant Premium plan, the following requirements must be met. +- You must use a [zone redundant storage account (ZRS)](../storage/common/storage-redundancy.md#zone-redundant-storage) for your function app's [storage account](storage-considerations.md#storage-account-requirements). If you use a different type of storage account, Functions may show unexpected behavior during a zonal outage. - Both Windows and Linux are supported. -- Must be hosted on an [Elastic Premium](functions-premium-plan.md) or Dedicated hosting plan. Instructions on zone redundancy with Dedicated (App Service) hosting plan can be found [here](../app-service/how-to-zone-redundancy.md). +- Must be hosted on an [Elastic Premium](functions-premium-plan.md) or Dedicated hosting plan. Instructions on zone redundancy with Dedicated (App Service) hosting plan can be found [in this article](../app-service/how-to-zone-redundancy.md). - Availability zone (AZ) support isn't currently available for function apps on [Consumption](consumption-plan.md) plans. -- Zone redundant plans must specify a minimum instance count of 3. -- Function apps on an Elastic Premium plan additionally must have a minimum [always ready instances](functions-premium-plan.md#always-ready-instances) count of 3. -- Can be enabled in any of the following regions: +- Zone redundant plans must specify a minimum instance count of three. +- Function apps hosted on a Premium plan must also have a minimum [always ready instances](functions-premium-plan.md#always-ready-instances) count of three. + +Zone-redundant Premium plans can currently be enabled in any of the following regions: - West US 2 - West US 3 - Central US + - South Central US - East US - East US 2 - Canada Central @@ -44,48 +48,86 @@ A zone redundant function app will automatically distribute load the instances t - Japan East - Southeast Asia - Australia East -- At this time, must be created through [ARM template](../azure-resource-manager/templates/index.yml). ## How to deploy a function app on a zone redundant Premium plan -For initial creation of a zone redundant Elastic Premium Functions plan, you need to deploy via [ARM templates](../azure-resource-manager/templates/quickstart-create-templates-use-visual-studio-code.md). Then, once successfully created, you can view and interact with the Function Plan via the Azure portal and CLI tooling. An ARM template is only needed for the initial creation of the Function Plan. A guide to hosting Functions on Premium plans can be found [here](functions-infrastructure-as-code.md#deploy-on-premium-plan). Once the zone redundant plan is created and deployed, any function app hosted on your new plan will now be zone redundant. +There are currently two ways to deploy a zone-redundant premium plan and function app. You can use either the [Azure portal](https://portal.azure.com) or an ARM template. + +# [Azure portal](#tab/azure-portal) + +1. Open the Azure portal and navigate to the **Create Function App** page. Information on creating a function app in the portal can be found [here](functions-create-function-app-portal.md#create-a-function-app). + +1. In the **Basics** page, fill out the fields for your function app. Pay special attention to the fields in the table below (also highlighted in the screenshot below), which have specific requirements for zone redundancy. + + | Setting | Suggested value | Notes for Zone Redundancy | + | ------------ | ---------------- | ----------- | + | **Region** | Preferred region | The subscription under which this new function app is created. You must pick a region that is AZ enabled from the [list above](#requirements). | + + ![Screenshot of Basics tab of function app create page.](./media/functions-az-redundancy\azure-functions-basics-az.png) + +1. In the **Hosting** page, fill out the fields for your function app hosting plan. Pay special attention to the fields in the table below (also highlighted in the screenshot below), which have specific requirements for zone redundancy. + + | Setting | Suggested value | Notes for Zone Redundancy | + | ------------ | ---------------- | ----------- | + | **Storage Account** | A [zone-redundant storage account](storage-considerations.md#storage-account-requirements) | As mentioned above in the [requirements](#requirements) section, we strongly recommend using a zone-redundant storage account for your zone redundant function app. | + | **Plan Type** | Functions Premium | This article details how to create a zone redundant app in a Premium plan. Zone redundancy isn't currently available in Consumption plans. Information on zone redundancy on app service plans can be found [in this article](../app-service/how-to-zone-redundancy.md). | + | **Zone Redundancy** | Enabled | This field populates the flag that determines if your app is zone redundant or not. You won't be able to select `Enabled` unless you have chosen a region supporting zone redundancy, as mentioned in step 2. | + + ![Screenshot of Hosting tab of function app create page.](./media/functions-az-redundancy\azure-functions-hosting-az.png) -The only properties to be aware of while creating a zone redundant Function plan are the new **zoneRedundant** property and the Function Plan instance count (**capacity**) fields. The **zoneRedundant** property must be set to **true** and the **capacity** property should be set based on the workload requirement, but no less than 3. Choosing the right capacity varies based on several factors and high availability/fault tolerance strategies. A good rule of thumb is to ensure sufficient instances for the application such that losing one zone of instances leaves sufficient capacity to handle expected load. +1. For the rest of the function app creation process, create your function app as normal. There are no fields in the rest of the creation process that affect zone redundancy. + +# [ARM template](#tab/arm-template) + +You can use an [ARM template](../azure-resource-manager/templates/quickstart-create-templates-use-visual-studio-code.md) to deploy to a zone-redundant Premium plan. A guide to hosting Functions on Premium plans can be found [here](functions-infrastructure-as-code.md#deploy-on-premium-plan). + +The only properties to be aware of while creating a zone-redundant hosting plan are the new `zoneRedundant` property and the plan's instance count (`capacity`) fields. The `zoneRedundant` property must be set to `true` and the `capacity` property should be set based on the workload requirement, but not less than `3`. Choosing the right capacity varies based on several factors and high availability/fault tolerance strategies. A good rule of thumb is to ensure sufficient instances for the application such that losing one zone of instances leaves sufficient capacity to handle expected load. > [!IMPORTANT] -> Azure function Apps hosted on an elastic premium, zone redundant Function plan must have a minimum [always ready instance](functions-premium-plan.md#always-ready-instances) count of 3. This is to enforce that a zone redundant function app always has enough instances to satisfy at least one worker per zone. +> Azure Functions apps hosted on an elastic premium, zone-redundant plan must have a minimum [always ready instance](functions-premium-plan.md#always-ready-instances) count of 3. This make sure that a zone-redundant function app always has enough instances to satisfy at least one worker per zone. -Below is an ARM template snippet for a zone redundant, Premium Function Plan, showing the new **zoneRedundant** field and the **capacity** specification. +Below is an ARM template snippet for a zone-redundant, Premium plan showing the `zoneRedundant` field and the `capacity` specification. -``` - "resources": [ - { - "type": "Microsoft.Web/serverfarms", - "apiVersion": "2021-01-15", - "name": "your_plan_name_here", - "location": "Central US", - "sku": { - "name": "EP3", - "tier": "ElasticPremium", - "size": "EP3", - "family": "EP", - "capacity": 3 - }, - "kind": "elastic", - "properties": { - "perSiteScaling": false, - "elasticScaleEnabled": true, - "maximumElasticWorkerCount": 20, - "isSpot": false, - "reserved": false, - "isXenon": false, - "hyperV": false, - "targetWorkerCount": 0, - "targetWorkerSizeId": 0, - "zoneRedundant": true - } +```json +"resources": [ + { + "type": "Microsoft.Web/serverfarms", + "apiVersion": "2021-01-15", + "name": "your_plan_name_here", + "location": "Central US", + "sku": { + "name": "EP3", + "tier": "ElasticPremium", + "size": "EP3", + "family": "EP", + "capacity": 3 + }, + "kind": "elastic", + "properties": { + "perSiteScaling": false, + "elasticScaleEnabled": true, + "maximumElasticWorkerCount": 20, + "isSpot": false, + "reserved": false, + "isXenon": false, + "hyperV": false, + "targetWorkerCount": 0, + "targetWorkerSizeId": 0, + "zoneRedundant": true } - ] + } +] ``` -To learn more, see [Automate resource deployment for your function app in Azure Functions](functions-infrastructure-as-code.md). +To learn more about these templates, see [Automate resource deployment in Azure Functions](functions-infrastructure-as-code.md). + +--- + +After the zone-redundant plan is created and deployed, any function app hosted on your new plan is considered zone-redundant. + +## Next steps + +> [!div class="nextstepaction"] +> [Improve the performance and reliability of Azure Functions](performance-reliability.md) + + diff --git a/articles/azure-functions/dotnet-isolated-process-guide.md b/articles/azure-functions/dotnet-isolated-process-guide.md index 2cc8cf6286a7..290bafcf6f26 100644 --- a/articles/azure-functions/dotnet-isolated-process-guide.md +++ b/articles/azure-functions/dotnet-isolated-process-guide.md @@ -44,7 +44,7 @@ A .NET isolated function project is basically a .NET console app project that ta + Program.cs file that's the entry point for the app. + Any code files [defining your functions](#bindings). -For complete examples, see the [.NET 6 isolated sample project](https://github.com/Azure/azure-functions-dotnet-worker/tree/main/samples/FunctionApp) and the [.NET Framework 4.8 isolated sample project](https://github.com/Azure/azure-functions-dotnet-worker/tree/main/samples/NetFxWorker). +For complete examples, see the [.NET 6 isolated sample project](https://github.com/Azure/azure-functions-dotnet-worker/tree/main/samples/FunctionApp) and the [.NET Framework 4.8 isolated sample project](https://go.microsoft.com/fwlink/p/?linkid=2197310). > [!NOTE] > To be able to publish your isolated function project to either a Windows or a Linux function app in Azure, you must set a value of `dotnet-isolated` in the remote [FUNCTIONS_WORKER_RUNTIME](functions-app-settings.md#functions_worker_runtime) application setting. To support [zip deployment](deployment-zip-push.md) and [running from the deployment package](run-functions-from-deployment-package.md) on Linux, you also need to update the `linuxFxVersion` site config setting to `DOTNET-ISOLATED|6.0`. To learn more, see [Manual version updates on Linux](set-runtime-version.md#manual-version-updates-on-linux). diff --git a/articles/azure-functions/durable/durable-functions-http-api.md b/articles/azure-functions/durable/durable-functions-http-api.md index 96101404f584..23e8d474e1d4 100644 --- a/articles/azure-functions/durable/durable-functions-http-api.md +++ b/articles/azure-functions/durable/durable-functions-http-api.md @@ -249,6 +249,7 @@ GET /admin/extensions/DurableTaskExtension/instances &createdTimeFrom={timestamp} &createdTimeTo={timestamp} &runtimeStatus={runtimeStatus1,runtimeStatus2,...} + &instanceIdPrefix={prefix} &showInput=[true|false] &top={integer} ``` @@ -263,6 +264,7 @@ GET /runtime/webhooks/durableTask/instances? &createdTimeFrom={timestamp} &createdTimeTo={timestamp} &runtimeStatus={runtimeStatus1,runtimeStatus2,...} + &instanceIdPrefix={prefix} &showInput=[true|false] &top={integer} ``` @@ -278,6 +280,7 @@ Request parameters for this API include the default set mentioned previously as | **`createdTimeFrom`** | Query string | Optional parameter. When specified, filters the list of returned instances that were created at or after the given ISO8601 timestamp.| | **`createdTimeTo`** | Query string | Optional parameter. When specified, filters the list of returned instances that were created at or before the given ISO8601 timestamp.| | **`runtimeStatus`** | Query string | Optional parameter. When specified, filters the list of returned instances based on their runtime status. To see the list of possible runtime status values, see the [Querying instances](durable-functions-instance-management.md) article. | +| **`instanceIdPrefix`** | Query string | Optional parameter. When specified, filters the list of returned instances to include only instances whose instance id starts with the specified prefix string. Available starting with [version 2.7.2](https://www.nuget.org/packages/Microsoft.Azure.WebJobs.Extensions.DurableTask/2.7.2) of the extension. | | **`top`** | Query string | Optional parameter. When specified, limits the number of instances returned by the query. | ### Response diff --git a/articles/azure-functions/durable/durable-functions-overview.md b/articles/azure-functions/durable/durable-functions-overview.md index 4a363cbef0d6..a51c93496d9f 100644 --- a/articles/azure-functions/durable/durable-functions-overview.md +++ b/articles/azure-functions/durable/durable-functions-overview.md @@ -5,6 +5,7 @@ author: cgillum ms.topic: overview ms.date: 05/24/2022 ms.author: cgillum +ms.custom: devdivchpfy22 ms.reviewer: azfuncdf #Customer intent: As a < type of user >, I want < what? > so that < why? >. --- diff --git a/articles/azure-functions/functions-add-output-binding-storage-queue-cli.md b/articles/azure-functions/functions-add-output-binding-storage-queue-cli.md index ac9465434959..aaa4f25ff234 100644 --- a/articles/azure-functions/functions-add-output-binding-storage-queue-cli.md +++ b/articles/azure-functions/functions-add-output-binding-storage-queue-cli.md @@ -118,15 +118,11 @@ mvn azure-functions:deploy # [Browser](#tab/browser) - Copy the complete **Invoke URL** shown in the output of the publish command into a browser address bar, appending the query parameter `&name=Functions`. The browser should display similar output as when you ran the function locally. - - ![The output of the function runs on Azure in a browser](./media/functions-add-output-binding-storage-queue-cli/function-test-cloud-browser.png) + Copy the complete **Invoke URL** shown in the output of the publish command into a browser address bar, appending the query parameter `&name=Functions`. The browser should display the same output as when you ran the function locally. # [curl](#tab/curl) - Run [`curl`](https://curl.haxx.se/) with the **Invoke URL**, appending the parameter `&name=Functions`. The output of the command should be the text, "Hello Functions." - - ![The output of the function runs on Azure using CURL](./media/functions-add-output-binding-storage-queue-cli/function-test-cloud-curl.png) + Run [`curl`](https://curl.haxx.se/) with the **Invoke URL**, appending the parameter `&name=Functions`. The output should be the same as when you ran the function locally. --- diff --git a/articles/azure-functions/functions-bindings-mobile-apps.md b/articles/azure-functions/functions-bindings-mobile-apps.md index 317f6a6fc194..71d0a45202e3 100644 --- a/articles/azure-functions/functions-bindings-mobile-apps.md +++ b/articles/azure-functions/functions-bindings-mobile-apps.md @@ -286,7 +286,7 @@ The following table explains the binding configuration properties that you set i | **name**| n/a | Name of output parameter in function signature.| |**tableName** |**TableName**|Name of the mobile app's data table| |**connection**|**MobileAppUriSetting**|The name of an app setting that has the mobile app's URL. The function uses this URL to construct the required REST operations against your mobile app. Create an app setting in your function app that contains the mobile app's URL, then specify the name of the app setting in the `connection` property in your input binding. The URL looks like `http://.azurewebsites.net`. -|**apiKey**|**ApiKeySetting**|The name of an app setting that has your mobile app's API key. Provide the API key if you implement an API key in your Node.js mobile app backend, or [implement an API key in your .NET mobile app backend](https://github.com/Azure/azure-mobile-apps-net-server/wiki/Implementing-Application-Key). To provide the key, create an app setting in your function app that contains the API key, then add the `apiKey` property in your input binding with the name of the app setting. | +|**apiKey**|**ApiKeySetting**|The name of an app setting that has your mobile app's API key. Provide the API key if you implement an API key in your Node.js mobile app backend, or implement an API key in your .NET mobile app backend. To provide the key, create an app setting in your function app that contains the API key, then add the `apiKey` property in your input binding with the name of the app setting. | [!INCLUDE [app settings to local.settings.json](../../includes/functions-app-settings-local.md)] diff --git a/articles/azure-functions/functions-overview.md b/articles/azure-functions/functions-overview.md index d03ea3c61a98..855ed1a6265b 100644 --- a/articles/azure-functions/functions-overview.md +++ b/articles/azure-functions/functions-overview.md @@ -4,9 +4,9 @@ description: Learn how Azure Functions can help build robust serverless apps. author: craigshoemaker ms.assetid: 01d6ca9f-ca3f-44fa-b0b9-7ffee115acd4 ms.topic: overview -ms.date: 11/20/2020 +ms.date: 05/27/2022 ms.author: cshoe -ms.custom: contperf-fy21q2 +ms.custom: contperf-fy21q2, devdivchpfy22 --- # Introduction to Azure Functions diff --git a/articles/azure-functions/functions-premium-plan.md b/articles/azure-functions/functions-premium-plan.md index 81f2057f9edb..ac8c6063794e 100644 --- a/articles/azure-functions/functions-premium-plan.md +++ b/articles/azure-functions/functions-premium-plan.md @@ -12,8 +12,7 @@ ms.custom: references_regions, fasttrack-edit, devx-track-azurecli The Azure Functions Elastic Premium plan is a dynamic scale hosting option for function apps. For other hosting plan options, see the [hosting plan article](functions-scale.md). ->[!IMPORTANT] ->Azure Functions runs on the Azure App Service platform. In the App Service platform, plans that host Premium plan function apps are referred to as *Elastic* Premium plans, with SKU names like `EP1`. If you choose to run your function app on a Premium plan, make sure to create a plan with an SKU name that starts with "E", such as `EP1`. App Service plan SKU names that start with "P", such as `P1V2` (Premium V2 Small plan), are actually [Dedicated hosting plans](dedicated-plan.md). Because they are Dedicated and not Elastic Premium, plans with SKU names starting with "P" won't scale dynamically and may increase your costs. +[!INCLUDE [functions-premium-plan-note](../../includes/functions-premium-plan-note.md)] Premium plan hosting provides the following benefits to your functions: diff --git a/articles/azure-functions/functions-reference-python.md b/articles/azure-functions/functions-reference-python.md index b28acc36149d..92d4478523c3 100644 --- a/articles/azure-functions/functions-reference-python.md +++ b/articles/azure-functions/functions-reference-python.md @@ -2,9 +2,9 @@ title: Python developer reference for Azure Functions description: Understand how to develop functions with Python ms.topic: article -ms.date: 05/19/2022 +ms.date: 05/25/2022 ms.devlang: python -ms.custom: devdivchpfy22 +ms.custom: devx-track-python, devdivchpfy22 --- # Azure Functions Python developer guide @@ -15,7 +15,7 @@ As a Python developer, you may also be interested in one of the following articl | Getting started | Concepts| Scenarios/Samples | |--|--|--| -|
    • [Python function using Visual Studio Code](./create-first-function-vs-code-python.md)
    • [Python function with terminal/command prompt](./create-first-function-cli-python.md)
    |
    • [Developer guide](functions-reference.md)
    • [Hosting options](functions-scale.md)
    • [Performance considerations](functions-best-practices.md)
    |
    • [Image classification with PyTorch](machine-learning-pytorch.md)
    • [Azure automation sample](/samples/azure-samples/azure-functions-python-list-resource-groups/azure-functions-python-sample-list-resource-groups/)
    • [Machine learning with TensorFlow](functions-machine-learning-tensorflow.md)
    • [Browse Python samples](/samples/browse/?products=azure-functions&languages=python)
    | +|
    • [Python function using Visual Studio Code](./create-first-function-vs-code-python.md)
    • [Python function with terminal/command prompt](./create-first-function-cli-python.md)
    |
    • [Developer guide](functions-reference.md)
    • [Hosting options](functions-scale.md)
    • [Performance considerations](functions-best-practices.md)
    |
    • [Image classification with PyTorch](machine-learning-pytorch.md)
    • [Azure Automation sample](/samples/azure-samples/azure-functions-python-list-resource-groups/azure-functions-python-sample-list-resource-groups/)
    • [Machine learning with TensorFlow](functions-machine-learning-tensorflow.md)
    • [Browse Python samples](/samples/browse/?products=azure-functions&languages=python)
    | > [!NOTE] > While you can [develop your Python based Azure Functions locally on Windows](create-first-function-vs-code-python.md#run-the-function-locally), Python is only supported on a Linux based hosting plan when running in Azure. See the list of supported [operating system/runtime](functions-scale.md#operating-systemruntime) combinations. @@ -345,7 +345,7 @@ Likewise, you can set the `status_code` and `headers` for the response message i ## Web frameworks -You can apply WSGI and ASGI-compatible frameworks such as Flask and FastAPI with your HTTP-triggered Python functions. This section shows how to modify your functions to support these frameworks. +You can use WSGI and ASGI-compatible frameworks such as Flask and FastAPI with your HTTP-triggered Python functions. This section shows how to modify your functions to support these frameworks. First, the function.json file must be updated to include a `route` in the HTTP trigger, as shown in the following example: @@ -538,9 +538,9 @@ The runtime uses the available Python version, when you run it locally. To set a Python function app to a specific language version, you need to specify the language and the version of the language in `LinuxFxVersion` field in site config. For example, to change Python app to use Python 3.8, set `linuxFxVersion` to `python|3.8`. -To learn more about Azure Functions runtime support policy, refer [article](./language-support-policy.md). +To learn more about Azure Functions runtime support policy, refer to this [article](./language-support-policy.md) -To see the full list of supported Python versions functions apps, refer [article](./supported-languages.md). +To see the full list of supported Python versions functions apps, refer to this [article](./supported-languages.md) # [Azure CLI](#tab/azurecli-linux) diff --git a/articles/azure-functions/functions-scale.md b/articles/azure-functions/functions-scale.md index 76b4ed5f26e1..56d880741645 100644 --- a/articles/azure-functions/functions-scale.md +++ b/articles/azure-functions/functions-scale.md @@ -3,9 +3,9 @@ title: Azure Functions scale and hosting description: Learn how to choose between Azure Functions Consumption plan and Premium plan. ms.assetid: 5b63649c-ec7f-4564-b168-e0a74cb7e0f3 ms.topic: conceptual -ms.date: 03/24/2022 +ms.date: 04/22/2022 -ms.custom: H1Hack27Feb2017 +ms.custom: H1Hack27Feb2017,devdivchpfy22 --- # Azure Functions hosting options @@ -30,7 +30,7 @@ The following is a summary of the benefits of the three main hosting plans for F | Plan | Benefits | | --- | --- | |**[Consumption plan]**| Scale automatically and only pay for compute resources when your functions are running.

    On the Consumption plan, instances of the Functions host are dynamically added and removed based on the number of incoming events.

    ✔ Default hosting plan.
    ✔ Pay only when your functions are running.
    ✔ Scales automatically, even during periods of high load.| -|**[Premium plan]**|Automatically scales based on demand using pre-warmed workers which run applications with no delay after being idle, runs on more powerful instances, and connects to virtual networks.

    Consider the Azure Functions Premium plan in the following situations:

    ✔ Your function apps run continuously, or nearly continuously.
    ✔ You have a high number of small executions and a high execution bill, but low GB seconds in the Consumption plan.
    ✔ You need more CPU or memory options than what is provided by the Consumption plan.
    ✔ Your code needs to run longer than the maximum execution time allowed on the Consumption plan.
    ✔ You require features that aren't available on the Consumption plan, such as virtual network connectivity.
    ✔ You want to provide a custom Linux image on which to run your functions. | +|**[Premium plan]**|Automatically scales based on demand using pre-warmed workers, which run applications with no delay after being idle, runs on more powerful instances, and connects to virtual networks.

    Consider the Azure Functions Premium plan in the following situations:

    ✔ Your function apps run continuously, or nearly continuously.
    ✔ You have a high number of small executions and a high execution bill, but low GB seconds in the Consumption plan.
    ✔ You need more CPU or memory options than what is provided by the Consumption plan.
    ✔ Your code needs to run longer than the maximum execution time allowed on the Consumption plan.
    ✔ You require features that aren't available on the Consumption plan, such as virtual network connectivity.
    ✔ You want to provide a custom Linux image on which to run your functions. | |**[Dedicated plan]** |Run your functions within an App Service plan at regular [App Service plan rates](https://azure.microsoft.com/pricing/details/app-service/windows/).

    Best for long-running scenarios where [Durable Functions](durable/durable-functions-overview.md) can't be used. Consider an App Service plan in the following situations:

    ✔ You have existing, underutilized VMs that are already running other App Service instances.
    ✔ Predictive scaling and costs are required.| The comparison tables in this article also include the following hosting options, which provide the highest amount of control and isolation in which to run your function apps. @@ -74,7 +74,7 @@ Maximum instances are given on a per-function app (Consumption) or per-plan (Pre | **[ASE][Dedicated plan]**3 | Manual/autoscale |100 | | **[Kubernetes]** | Event-driven autoscale for Kubernetes clusters using [KEDA](https://keda.sh). | Varies by cluster  | -1 During scale out, there's currently a limit of 500 instances per subscription per hour for Linux apps on a Consumption plan.
    +1 During scale-out, there's currently a limit of 500 instances per subscription per hour for Linux apps on a Consumption plan.
    2 In some regions, Linux apps on a Premium plan can scale to 40 instances. For more information, see the [Premium plan article](functions-premium-plan.md#region-max-scale-out).
    3 For specific limits for the various App Service plan options, see the [App Service plan limits](../azure-resource-manager/management/azure-subscription-service-limits.md#app-service-limits). @@ -101,7 +101,7 @@ Maximum instances are given on a per-function app (Consumption) or per-plan (Pre | Plan | Details | | --- | --- | | **[Consumption plan]** | Pay only for the time your functions run. Billing is based on number of executions, execution time, and memory used. | -| **[Premium plan]** | Premium plan is based on the number of core seconds and memory used across needed and pre-warmed instances. At least one instance per plan must be kept warm at all times. This plan provides the most predictable pricing. | +| **[Premium plan]** | Premium plan is based on the number of core seconds and memory used across needed and pre-warmed instances. At least one instance per plan must always be kept warm. This plan provides the most predictable pricing. | | **[Dedicated plan]** | You pay the same for function apps in an App Service Plan as you would for other App Service resources, like web apps.| | **[App Service Environment (ASE)][Dedicated plan]** | There's a flat monthly rate for an ASE that pays for the infrastructure and doesn't change with the size of the ASE. There's also a cost per App Service plan vCPU. All apps hosted in an ASE are in the Isolated pricing SKU. | | **[Kubernetes]**| You pay only the costs of your Kubernetes cluster; no additional billing for Functions. Your function app runs as an application workload on top of your cluster, just like a regular app. | diff --git a/articles/azure-functions/functions-triggers-bindings.md b/articles/azure-functions/functions-triggers-bindings.md index 6122826bfee9..f3ca9ed11209 100644 --- a/articles/azure-functions/functions-triggers-bindings.md +++ b/articles/azure-functions/functions-triggers-bindings.md @@ -4,15 +4,16 @@ description: Learn to use triggers and bindings to connect your Azure Function t author: craigshoemaker ms.topic: conceptual -ms.date: 02/18/2019 +ms.date: 05/25/2022 ms.author: cshoe +ms.custom: devdivchpfy22 --- # Azure Functions triggers and bindings concepts -In this article you learn the high-level concepts surrounding functions triggers and bindings. +In this article, you learn the high-level concepts surrounding functions triggers and bindings. -Triggers are what cause a function to run. A trigger defines how a function is invoked and a function must have exactly one trigger. Triggers have associated data, which is often provided as the payload of the function. +Triggers cause a function to run. A trigger defines how a function is invoked and a function must have exactly one trigger. Triggers have associated data, which is often provided as the payload of the function. Binding to a function is a way of declaratively connecting another resource to the function; bindings may be connected as *input bindings*, *output bindings*, or both. Data from bindings is provided to the function as parameters. @@ -31,7 +32,7 @@ Consider the following examples of how you could implement different functions. \* Represents different queues -These examples are not meant to be exhaustive, but are provided to illustrate how you can use triggers and bindings together. +These examples aren't meant to be exhaustive, but are provided to illustrate how you can use triggers and bindings together. ### Trigger and binding definitions diff --git a/articles/azure-functions/media/functions-az-redundancy/azure-functions-basics-az.png b/articles/azure-functions/media/functions-az-redundancy/azure-functions-basics-az.png new file mode 100644 index 000000000000..fed37f2d2caa Binary files /dev/null and b/articles/azure-functions/media/functions-az-redundancy/azure-functions-basics-az.png differ diff --git a/articles/azure-functions/media/functions-az-redundancy/azure-functions-hosting-az.png b/articles/azure-functions/media/functions-az-redundancy/azure-functions-hosting-az.png new file mode 100644 index 000000000000..e9fb9f91c659 Binary files /dev/null and b/articles/azure-functions/media/functions-az-redundancy/azure-functions-hosting-az.png differ diff --git a/articles/azure-monitor/alerts/alerts-log.md b/articles/azure-monitor/alerts/alerts-log.md index 933cc24094bc..c07a1248b579 100644 --- a/articles/azure-monitor/alerts/alerts-log.md +++ b/articles/azure-monitor/alerts/alerts-log.md @@ -12,10 +12,15 @@ ms.reviewer: yanivlavi This article shows you how to create log alert rules and manage your alert instances. Azure Monitor log alerts allow users to use a [Log Analytics](../logs/log-analytics-tutorial.md) query to evaluate resource logs at a set frequency and fire an alert based on the results. Rules can trigger one or more actions using [alert processing rules](alerts-action-rules.md) and [action groups](./action-groups.md). Learn the concepts behind log alerts [here](alerts-types.md#log-alerts). -When an alert is triggered by an alert rule, -- Target: A specific Azure resource to monitor. -- Criteria: Logic to evaluate. If met, the alert fires. -- Action: Notifications or automation - email, SMS, webhook, and so on. +You create an alert rule by combining: + - The resource(s) to be monitored. + - The signal or telemetry from the resource + - Conditions + +And then defining these elements of the triggered alert: + - Alert processing rules + - Action groups + You can also [create log alert rules using Azure Resource Manager templates](../alerts/alerts-log-create-templates.md). ## Create a new log alert rule in the Azure portal @@ -56,7 +61,7 @@ You can also [create log alert rules using Azure Resource Manager templates](../ |Field |Description | |---------|---------| - |Dimension name|Dimensions can be either number or string columns. Dimensions are used to monitor specific time series and provide context to a fired alert.
    Splitting on the Azure Resource ID column makes the specified resource into the alert target. If an Resource ID column is detected, it is selected automatically and changes the context of the fired alert to the record's resource. | + |Dimension name|Dimensions can be either number or string columns. Dimensions are used to monitor specific time series and provide context to a fired alert.
    Splitting on the Azure Resource ID column makes the specified resource into the alert target. If a Resource ID column is detected, it is selected automatically and changes the context of the fired alert to the record's resource. | |Operator|The operator used on the dimension name and value. | |Dimension values|The dimension values are based on data from the last 48 hours. Select **Add custom value** to add custom dimension values. | @@ -113,10 +118,11 @@ You can also [create log alert rules using Azure Resource Manager templates](../ > [!NOTE] > This section above describes creating alert rules using the new alert rule wizard. > The new alert rule experience is a little different than the old experience. Please note these changes: -> - Previously, search results were included in the payloads of the triggered alert and its associated notifications. This was a limited and error prone solution. To get detailed context information about the alert so that you can decide on the appropriate action : -> - The recommended best practice it to use [Dimensions](alerts-unified-log.md#split-by-alert-dimensions). Dimensions provide the column value that fired the alert, giving you context for why the alert fired and how to fix the issue. -> - When you need to investigate in the logs, use the link in the alert to the search results in Logs. -> - If you need the raw search results or for any other advanced customizations, use Logic Apps. +> - Previously, search results were included in the payloads of the triggered alert and its associated notifications. This was a limited solution, since the email included only 10 rows from the unfiltered results while the webhook payload contained 1000 unfiltered results. +> To get detailed context information about the alert so that you can decide on the appropriate action : +> - We recommend using [Dimensions](alerts-unified-log.md#split-by-alert-dimensions). Dimensions provide the column value that fired the alert, giving you context for why the alert fired and how to fix the issue. +> - When you need to investigate in the logs, use the link in the alert to the search results in Logs. +> - If you need the raw search results or for any other advanced customizations, use Logic Apps. > - The new alert rule wizard does not support customization of the JSON payload. > - Use custom properties in the [new API](/rest/api/monitor/scheduledqueryrule-2021-08-01/scheduled-query-rules/create-or-update#actions) to add static parameters and associated values to the webhook actions triggered by the alert. > - For more advanced customizations, use Logic Apps. diff --git a/articles/azure-monitor/alerts/itsmc-connections.md b/articles/azure-monitor/alerts/itsmc-connections.md index 3e1df5cbe0b3..307bf01c741a 100644 --- a/articles/azure-monitor/alerts/itsmc-connections.md +++ b/articles/azure-monitor/alerts/itsmc-connections.md @@ -12,7 +12,7 @@ To set up your ITSM environment: 1. Connect to your ITSM. - For ServiceNow ITSM, see [the ServiceNow connection instructions](./itsmc-connections-servicenow.md). - - For SCSM, see [the System Center Service Manager connection instructions](./itsmc-connections-scsm.md). + - For SCSM, see [the System Center Service Manager connection instructions](/azure/azure-monitor/alerts/itsmc-connections). >[!NOTE] > As of March 1, 2022, System Center ITSM integrations with Azure alerts is no longer enabled for new customers. New System Center ITSM Connections are not supported. diff --git a/articles/azure-monitor/alerts/itsmc-definition.md b/articles/azure-monitor/alerts/itsmc-definition.md index 47261e926291..8adff9efbe36 100644 --- a/articles/azure-monitor/alerts/itsmc-definition.md +++ b/articles/azure-monitor/alerts/itsmc-definition.md @@ -60,7 +60,7 @@ After you've prepped your ITSM tool, complete these steps to create a connection 1. Specify the connection settings for the ITSM product that you're using: - [ServiceNow](./itsmc-connections-servicenow.md) - - [System Center Service Manager](./itsmc-connections-scsm.md) + - [System Center Service Manager](/azure/azure-monitor/alerts/itsmc-connections) > [!NOTE] > By default, ITSMC refreshes the connection's configuration data once every 24 hours. To refresh your connection's data instantly to reflect any edits or template updates that you make, select the **Sync** button on your connection's pane: diff --git a/articles/azure-monitor/app/powershell.md b/articles/azure-monitor/app/powershell.md index 10fcc4a82045..5310166bf345 100644 --- a/articles/azure-monitor/app/powershell.md +++ b/articles/azure-monitor/app/powershell.md @@ -157,7 +157,8 @@ Create a new .json file - let's call it `template1.json` in this example. Copy t "tags": {}, "properties": { "ApplicationId": "[parameters('appName')]", - "retentionInDays": "[parameters('retentionInDays')]" + "retentionInDays": "[parameters('retentionInDays')]", + "ImmediatePurgeDataOn30Days": "[parameters('ImmediatePurgeDataOn30Days')]" }, "dependsOn": [] }, diff --git a/articles/azure-monitor/app/sampling.md b/articles/azure-monitor/app/sampling.md index c16843621236..76448f9ad905 100644 --- a/articles/azure-monitor/app/sampling.md +++ b/articles/azure-monitor/app/sampling.md @@ -104,11 +104,11 @@ In [`ApplicationInsights.config`](./configuration-with-applicationinsights-confi The amount of telemetry to sample when the app has just started. Don't reduce this value while you're debugging. -* `Trace;Exception` +* `type;type` A semi-colon delimited list of types that you do not want to be subject to sampling. Recognized types are: `Dependency`, `Event`, `Exception`, `PageView`, `Request`, `Trace`. All telemetry of the specified types is transmitted; the types that are not specified will be sampled. -* `Request;Dependency` +* `type;type` A semi-colon delimited list of types that you do want to subject to sampling. Recognized types are: `Dependency`, `Event`, `Exception`, `PageView`, `Request`, `Trace`. The specified types will be sampled; all telemetry of the other types will always be transmitted. diff --git a/articles/azure-monitor/insights/data-explorer.md b/articles/azure-monitor/insights/data-explorer.md deleted file mode 100644 index 63a41adbdcaf..000000000000 --- a/articles/azure-monitor/insights/data-explorer.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Azure Data Explorer Insights| Microsoft Docs -description: This article describes how to use Azure Data Explorer Insights. -services: azure-monitor -ms.topic: conceptual -ms.date: 01/05/2021 - ---- - -# Azure Data Explorer Insights - -Azure Data Explorer Insights provides comprehensive monitoring of your clusters by delivering a unified view of your cluster performance, operations, usage, and failures. - -It offers: - -- **At-scale perspective**. A snapshot view of your clusters' primary metrics helps you track performance of queries, ingestion, and export operations. -- **Drill-down analysis**. You can drill down into a particular Azure Data Explorer cluster to perform detailed analysis. -- **Customization**. You can change which metrics you want to see, modify or set thresholds that align with your limits, and save your own custom workbooks. Charts in a workbook can be pinned to Azure dashboards. - -This article will help you understand how to onboard and use Azure Data Explorer Insights. - -## View from Azure Monitor (at-scale perspective) - -From Azure Monitor, you can view the main performance metrics for the cluster. These metrics include information about queries, ingestion, and export operations from multiple clusters in your subscription. They can help you identify performance problems. - -To view the performance of your clusters across all your subscriptions: - -1. Sign in to the [Azure portal](https://portal.azure.com/). - -2. Select **Monitor** from the left pane. In the **Insights Hub** section, select **Azure Data Explorer Clusters**. - -![Screenshot of selections for viewing the performance of Azure Data Explorer clusters.](./media/data-explorer/insights-hub.png) - -### Overview tab - -On the **Overview** tab for the selected subscription, the table displays interactive metrics for the Azure Data Explorer clusters grouped within the subscription. You can filter results based on the options that you select from the following dropdown lists: - -* **Subscriptions**: Only subscriptions that have Azure Data Explorer clusters are listed. - -* **Azure Data Explorer clusters**: By default, up to five clusters are pre-selected. If you select all or multiple clusters in the scope selector, up to 200 clusters will be returned. - -* **Time Range**: By default, the table displays the last 24 hours of information based on the corresponding selections made. - -The counter tile, under the dropdown list, gives the total number of Azure Data Explorer clusters in the selected subscriptions and shows how many are selected. There are conditional color codings for the columns: **Keep alive**, **CPU**, **Ingestion Utilization**, and **Cache Utilization**. Orange-coded cells have values that are not sustainable for the cluster. - -To better understand what each of the metrics represent, we recommend reading through the documentation on [Azure Data Explorer metrics](/azure/data-explorer/using-metrics#cluster-metrics). - -### Query Performance tab - -The **Query Performance** tab shows the query duration, the total number of concurrent queries, and the total number of throttled queries. - -![Screenshot of the Query Performance tab.](./media/data-explorer/query-performance.png) - -### Ingestion Performance tab - -The **Ingestion Performance** tab shows the ingestion latency, succeeded ingestion results, failed ingestion results, ingestion volume, and events processed for event hubs and IoT hubs. - -[![Screenshot of the Ingestion Performance tab.](./media/data-explorer/ingestion-performance.png)](./media/data-explorer/ingestion-performance.png#lightbox) - -### Streaming Ingest Performance tab - -The **Streaming Ingest Performance** tab provides information on the average data rate, average duration, and request rate. - -### Export Performance tab - -The **Export Performance** tab provides information on exported records, lateness, pending count, and utilization percentage for continuous export operations. - -## View from an Azure Data Explorer Cluster resource (drill-down analysis) - -To access Azure Data Explorer Insights directly from an Azure Data Explorer cluster: - -1. In the Azure portal, select **Azure Data Explorer Clusters**. - -2. From the list, choose an Azure Data Explorer cluster. In the monitoring section, select **Insights**. - -You can also access these views by selecting the resource name of an Azure Data Explorer cluster from within the Azure Monitor insights view. - -> [!NOTE] -> Azure Data Explorer Insights combines both logs and metrics to provide a global monitoring solution. The inclusion of logs-based visualizations requires users to [enable diagnostic logging of their Azure Data Explorer cluster and send them to a Log Analytics workspace](/azure/data-explorer/using-diagnostic-logs?tabs=commands-and-queries#enable-diagnostic-logs). The diagnostic logs that should be enabled are **Command**, **Query**, **SucceededIngestion**, **FailedIngestion**, **IngestionBatching**, **TableDetails**, and **TableUsageStatistics**. (Enabling **SucceededIngestion** logs might be costly. Enable them only if you need to monitor successful ingestions.) - -![Screenshot of the button for configuring logs for monitoring.](./media/data-explorer/enable-logs.png) - -### Overview tab - -The **Overview** tab shows: - -- Metrics tiles that highlight the availability and overall status of the cluster for quick health assessment. - -- A summary of active [Azure Advisor recommendations](/azure/data-explorer/azure-advisor) and [resource health](/azure/data-explorer/monitor-with-resource-health) status. - -- Charts that show the top CPU and memory consumers and the number of unique users over time. - -[![Screenshot of the view from an Azure Data Explorer cluster resource.](./media/data-explorer/overview.png)](./media/data-explorer/overview.png#lightbox) - -### Key Metrics tab - -The **Key Metrics** tab shows a unified view of some of the cluster's metrics. They're grouped into general metrics, query-related metrics, ingestion-related metrics, and streaming ingestion-related metrics. - -[![Screenshot of graphs on the Key Metrics tab.](./media/data-explorer/key-metrics.png)](./media/data-explorer/key-metrics.png#lightbox) - -### Usage tab - -The **Usage** tab allows users to deep dive into the performance of the cluster's commands and queries. On this tab, you can: - -- See which workload groups, users, and applications are sending the most queries or consuming the most CPU and memory. You can then understand which workloads are submitting the heaviest queries for the cluster to process. -- Identify top workload groups, users, and applications by failed queries. -- Identify recent changes in the number of queries, compared to the historical daily average (over the past 16 days), by workload group, user, and application. -- Identify trends and peaks in the number of queries, memory, and CPU consumption by workload group, user, application, and command type. - -The **Usage** tab includes actions that are performed directly by users. Internal cluster operations are not included in this tab. - -[![Screenshot of the operations view with donut charts related to commands and queries.](./media/data-explorer/usage.png)](./media/data-explorer/usage.png#lightbox) - -[![Screenshot of the operations view with line charts related to queries and memory.](./media/data-explorer/usage-2.png)](./media/data-explorer/usage-2.png#lightbox) - -### Tables tab - -The **Tables** tab shows the latest and historical properties of tables in the cluster. You can see which tables are consuming the most space. You can also track growth history by table size, hot data, and the number of rows over time. - -### Cache tab - -The **Cache** tab allows users to analyze their actual queries' lookback window patterns and compare them to the configured cache policy (for each table). You can identify tables used by the most queries and tables that are not queried at all, and adapt the cache policy accordingly. - -You might get cache policy recommendations on specific tables in Azure Advisor. Currently, cache recommendations are available only from the [main Azure Advisor dashboard](/azure/data-explorer/azure-advisor#use-the-azure-advisor-recommendations). They're based on actual queries' lookback window in the past 30 days and an unoptimized cache policy for at least 95 percent of the queries. - -Cache reduction recommendations in Azure Advisor are available for clusters that are "bounded by data." That means the cluster has low CPU and low ingestion utilization, but because of high data capacity, the cluster can't scale in or scale down. - -[![Screenshot of cache details.](./media/data-explorer/cache-tab.png)](./media/data-explorer/cache-tab.png#lightbox) - -### Cluster Boundaries tab - -The **Cluster Boundaries** tab displays the cluster boundaries based on your usage. On this tab, you can inspect the CPU, ingestion, and cache utilization. These metrics are scored as **Low**, **Medium**, or **High**. These metrics and scores are important when you're deciding on the optimal SKU and instance count for your cluster. They're taken into account in Azure Advisor SKU/size recommendations. - -On this tab, you can select a metric tile and deep dive to understand its trend and how its score is decided. You can also view the Azure Advisor SKU/size recommendation for your cluster. For example, in the following image, you can see that all metrics are scored as **Low**. The cluster receives a cost recommendation that will allow it to scale in/down and save cost. - -> [!div class="mx-imgBorder"] -> [![Screenshot of cluster boundaries.](./media/data-explorer/cluster-boundaries.png)](./media/data-explorer/cluster-boundaries.png#lightbox) - -## Pin to an Azure dashboard - -You can pin any one of the metric sections (of the "at-scale" perspective) to an Azure dashboard by selecting the pushpin icon at the upper right of the section. - -![Screenshot of the pin icon selected.](./media/data-explorer/pin.png) - -## Customize Azure Data Explorer Insights - -You can edit the workbook to customize it in support of your data analytics needs: -* Scope the workbook to always select a particular subscription or Azure Data Explorer clusters. -* Change metrics in the grid. -* Change thresholds or color rendering/coding. - -You can begin customizations by selecting the **Customize** button on the top toolbar. - -![Screenshot of the Customize button.](./media/data-explorer/customize.png) - -Customizations are saved to a custom workbook to prevent overwriting the default configuration in a published workbook. Workbooks are saved within a resource group, either in the **My Reports** section that's private to you or in the **Shared Reports** section that's accessible to everyone with access to the resource group. After you save the custom workbook, go to the workbook gallery to open it. - -![Screenshot of the workbook gallery.](./media/data-explorer/gallery.png) - -## Troubleshooting - -For general troubleshooting guidance, see the [Troubleshooting workbook-based insights](troubleshoot-workbooks.md) article. - -The following sections will help you diagnose and troubleshoot of some of the common problems that you might encounter when using Azure Data Explorer Insights. - -### Why don't I see all my subscriptions in the subscription picker? - -Azure Data Explorer Insights shows only subscriptions that contain Azure Data Explorer clusters chosen from the selected subscription filter. You select a subscription filter under **Directory + subscription** in the Azure portal. - -![Screenshot of selecting a subscription filter.](./media/key-vaults-insights-overview/Subscriptions.png) - -### Why don't I see any data for my Azure Data Explorer cluster under the Usage, Tables, or Cache section? - -To view your logs-based data, you need to [enable diagnostic logs](/azure/data-explorer/using-diagnostic-logs?tabs=commands-and-queries#enable-diagnostic-logs) for each Azure Data Explorer cluster that you want to monitor. You can do this under the diagnostic settings for each cluster. You'll need to send your data to a Log Analytics workspace. The diagnostic logs that should be enabled are **Command**, **Query**, **TableDetails**, and **TableUsageStatistics**. - -### I've already enabled logs for my Azure Data Explorer cluster. Why am I still unable to see my data under Commands and Queries? - -Currently, diagnostic logs don't work retroactively. The data will start appearing after actions have been taken in Azure Data Explorer. It might take some time, ranging from hours to a day, depending on how active your Azure Data Explorer cluster is. - -## Next steps - -Learn the scenarios that workbooks are designed to support, how to author new and customize existing reports, and more by reviewing [Create interactive reports with Azure Monitor workbooks](../visualize/workbooks-overview.md). diff --git a/articles/azure-monitor/insights/media/data-explorer/cache-tab.png b/articles/azure-monitor/insights/media/data-explorer/cache-tab.png deleted file mode 100644 index 7cb896d07be1..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/cache-tab.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/cluster-boundaries.png b/articles/azure-monitor/insights/media/data-explorer/cluster-boundaries.png deleted file mode 100644 index e931424c9d19..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/cluster-boundaries.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/customize.png b/articles/azure-monitor/insights/media/data-explorer/customize.png deleted file mode 100644 index c4f22350ee0c..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/customize.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/enable-logs.png b/articles/azure-monitor/insights/media/data-explorer/enable-logs.png deleted file mode 100644 index 91963467abde..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/enable-logs.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/gallery.png b/articles/azure-monitor/insights/media/data-explorer/gallery.png deleted file mode 100644 index 97001779ba81..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/gallery.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/ingestion-performance.png b/articles/azure-monitor/insights/media/data-explorer/ingestion-performance.png deleted file mode 100644 index 74e21901ec75..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/ingestion-performance.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/insights-hub.png b/articles/azure-monitor/insights/media/data-explorer/insights-hub.png deleted file mode 100644 index 0168f88cf521..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/insights-hub.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/key-metrics.png b/articles/azure-monitor/insights/media/data-explorer/key-metrics.png deleted file mode 100644 index a79966ce1fd2..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/key-metrics.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/overview.png b/articles/azure-monitor/insights/media/data-explorer/overview.png deleted file mode 100644 index 43514597b018..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/overview.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/pin.png b/articles/azure-monitor/insights/media/data-explorer/pin.png deleted file mode 100644 index 72b4d8821b3e..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/pin.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/query-performance.png b/articles/azure-monitor/insights/media/data-explorer/query-performance.png deleted file mode 100644 index 7b5dad7b06c1..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/query-performance.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/usage-2.png b/articles/azure-monitor/insights/media/data-explorer/usage-2.png deleted file mode 100644 index 553d209aade9..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/usage-2.png and /dev/null differ diff --git a/articles/azure-monitor/insights/media/data-explorer/usage.png b/articles/azure-monitor/insights/media/data-explorer/usage.png deleted file mode 100644 index 7fcb3dc98622..000000000000 Binary files a/articles/azure-monitor/insights/media/data-explorer/usage.png and /dev/null differ diff --git a/articles/azure-monitor/logs/basic-logs-configure.md b/articles/azure-monitor/logs/basic-logs-configure.md index eb8e96377377..07f2c15680ea 100644 --- a/articles/azure-monitor/logs/basic-logs-configure.md +++ b/articles/azure-monitor/logs/basic-logs-configure.md @@ -27,6 +27,27 @@ You can currently configure the following tables for Basic Logs: ## Set table configuration + +# [Portal](#tab/portal-1) + +To configure a table for Basic Logs or Analytics Logs in the Azure portal: + +1. From the **Log Analytics workspaces** menu, select **Tables (preview)**. + + The **Tables (preview)** screen lists all of the tables in the workspace. + +1. Select the context menu for the table you want to configure and select **Manage table**. + + :::image type="content" source="media/basic-logs-configure/log-analytics-table-configuration.png" lightbox="media/basic-logs-configure/log-analytics-table-configuration.png" alt-text="Screenshot showing the Manage table button for one of the tables in a workspace."::: + +1. From the **Table plan** dropdown on the table configuration screen, select **Basic** or **Analytics**. + + The **Table plan** dropdown is enabled only for [tables that support Basic Logs](#which-tables-support-basic-logs). + + :::image type="content" source="media/basic-logs-configure/log-analytics-configure-table-plan.png" lightbox="media/basic-logs-configure/log-analytics-configure-table-plan.png" alt-text="Screenshot showing the Table plan dropdown on the table configuration screen."::: + +1. Select **Save**. + # [API](#tab/api-1) To configure a table for Basic Logs or Analytics Logs, call the **Tables - Update** API: @@ -117,9 +138,11 @@ For example: --- ## Check table configuration -# [Portal](#tab/portal-1) +# [Portal](#tab/portal-2) + +To check table configuration in the Azure portal, you can open the table configuration screen, as described in [Set table configuration](#set-table-configuration). -To check the configuration of a table in the Azure portal: +Alternatively: 1. From the **Azure Monitor** menu, select **Logs** and select your workspace for the [scope](scope.md). See [Log Analytics tutorial](log-analytics-tutorial.md#view-table-information) for a walkthrough. 1. Open the **Tables** tab, which lists all tables in the workspace. @@ -128,7 +151,7 @@ To check the configuration of a table in the Azure portal: ![Screenshot of the Basic Logs table icon in the table list.](./media/basic-logs-configure/table-icon.png#lightbox) - You can also hover over a table name for the table information view. This will specify that the table is configured as Basic Logs: + You can also hover over a table name for the table information view, which indicates whether the table is configured as Basic Logs: ![Screenshot of the Basic Logs table indicator in the table details.](./media/basic-logs-configure/table-info.png#lightbox) diff --git a/articles/azure-monitor/logs/cost-logs.md b/articles/azure-monitor/logs/cost-logs.md index 8ae76080e310..c3f4b813ed92 100644 --- a/articles/azure-monitor/logs/cost-logs.md +++ b/articles/azure-monitor/logs/cost-logs.md @@ -115,7 +115,6 @@ When Microsoft Sentinel is enabled in a Log Analytics workspace, all data collec - [SecurityDetection](/azure/azure-monitor/reference/tables/securitydetection) - [SecurityEvent](/azure/azure-monitor/reference/tables/securityevent) - [WindowsFirewall](/azure/azure-monitor/reference/tables/windowsfirewall) -- [MaliciousIPCommunication](/azure/azure-monitor/reference/tables/maliciousipcommunication) - [LinuxAuditLog](/azure/azure-monitor/reference/tables/linuxauditlog) - [SysmonEvent](/azure/azure-monitor/reference/tables/sysmonevent) - [ProtectionStatus](/azure/azure-monitor/reference/tables/protectionstatus) diff --git a/articles/azure-monitor/logs/customer-managed-keys.md b/articles/azure-monitor/logs/customer-managed-keys.md index f80af1a5810c..73d9ba0ba933 100644 --- a/articles/azure-monitor/logs/customer-managed-keys.md +++ b/articles/azure-monitor/logs/customer-managed-keys.md @@ -254,10 +254,9 @@ When link your own storage (BYOS) to workspace, the service stores *saved-search * You need to have "write" permissions on your workspace and Storage Account. * Make sure to create your Storage Account in the same region as your Log Analytics workspace is located. * The *saves searches* in storage is considered as service artifacts and their format may change. -* Existing *saves searches* are removed from your workspace. Copy and any *saves searches* that you need before the configuration. You can view your *saved-searches* using [PowerShell](/powershell/module/az.operationalinsights/get-azoperationalinsightssavedsearch). -* Query history isn't supported and you won't be able to see queries that you ran. +* Existing *saves searches* are removed from your workspace. Copy any *saves searches* that you need before this configuration. You can view your *saved-searches* using [PowerShell](/powershell/module/az.operationalinsights/get-azoperationalinsightssavedsearch). +* Query 'history' and 'pin to dashboard' aren't supported when linking Storage Account for queries. * You can link a single Storage Account to a workspace, which can be used for both *saved-searches* and *log alerts* queries. -* Pin to dashboard isn't supported. * Fired log alerts will not contains search results or alert query. You can use [alert dimensions](../alerts/alerts-unified-log.md#split-by-alert-dimensions) to get context in the fired alerts. **Configure BYOS for saved-searches queries** @@ -380,21 +379,15 @@ Customer-Managed key is provided on dedicated cluster and these operations are r ## Limitations and constraints -- The max number of cluster per region and subscription is two. +- A maximum of five active clusters can be created in each region and subscription. -- The maximum number of workspaces that can be linked to a cluster is 1000. +- A maximum number of seven reserved clusters (active or recently deleted) can exist in each region and subscription. -- You can link a workspace to your cluster and then unlink it. The number of workspace link operations on particular workspace is limited to two in a period of 30 days. +- A maximum of 1,000 Log Analytics workspaces can be linked to a cluster. -- Customer-managed key encryption applies to newly ingested data after the configuration time. Data that was ingested prior to the configuration, remains encrypted with Microsoft key. You can query data ingested before and after the Customer-managed key configuration seamlessly. - -- The Azure Key Vault must be configured as recoverable. These properties aren't enabled by default and should be configured using CLI or PowerShell:
    - - [Soft Delete](../../key-vault/general/soft-delete-overview.md). - - [Purge protection](../../key-vault/general/soft-delete-overview.md#purge-protection) should be turned on to guard against force deletion of the secret, vault even after soft delete. - -- Cluster move to another resource group or subscription isn't supported currently. +- A maximum of two workspace link operations on particular workspace is allowed in 30 day period. -- Your Azure Key Vault, cluster and workspaces must be in the same region and in the same Azure Active Directory (Azure AD) tenant, but they can be in different subscriptions. +- Moving a cluster to another resource group or subscription isn't currently supported. - Cluster update should not include both identity and key identifier details in the same operation. In case you need to update both, the update should be in two consecutive operations. @@ -404,9 +397,19 @@ Customer-Managed key is provided on dedicated cluster and these operations are r - If you create a cluster and get an error—"region-name doesn’t support Double Encryption for clusters", you can still create the cluster without Double encryption, by adding `"properties": {"isDoubleEncryptionEnabled": false}` in the REST request body. - Double encryption setting can not be changed after the cluster has been created. - - Setting the cluster's `identity` `type` to `None` also revokes access to your data, but this approach isn't recommended since you can't revert it without contacting support. The recommended way to revoke access to your data is [key revocation](#key-revocation). +Deleting a linked workspace is permitted while linked to cluster. If you decide to [recover](./delete-workspace.md#recover-workspace) the workspace during the [soft-delete](./delete-workspace.md#soft-delete-behavior) period, it returns to previous state and remains linked to cluster. + +- Customer-managed key encryption applies to newly ingested data after the configuration time. Data that was ingested prior to the configuration, remains encrypted with Microsoft key. You can query data ingested before and after the Customer-managed key configuration seamlessly. + +- The Azure Key Vault must be configured as recoverable. These properties aren't enabled by default and should be configured using CLI or PowerShell:
    + - [Soft Delete](../../key-vault/general/soft-delete-overview.md). + - [Purge protection](../../key-vault/general/soft-delete-overview.md#purge-protection) should be turned on to guard against force deletion of the secret, vault even after soft delete. + +- Your Azure Key Vault, cluster and workspaces must be in the same region and in the same Azure Active Directory (Azure AD) tenant, but they can be in different subscriptions. + +- Setting the cluster's `identity` `type` to `None` also revokes access to your data, but this approach isn't recommended since you can't revert it without contacting support. The recommended way to revoke access to your data is [key revocation](#key-revocation). - - You can't use Customer-managed key with User-assigned managed identity if your Key Vault is in Private-Link (vNet). You can use System-assigned managed identity in this scenario. +- You can't use Customer-managed key with User-assigned managed identity if your Key Vault is in Private-Link (vNet). You can use System-assigned managed identity in this scenario. ## Troubleshooting diff --git a/articles/azure-monitor/logs/data-retention-archive.md b/articles/azure-monitor/logs/data-retention-archive.md index 0a4774cc776a..b16534a34181 100644 --- a/articles/azure-monitor/logs/data-retention-archive.md +++ b/articles/azure-monitor/logs/data-retention-archive.md @@ -23,7 +23,7 @@ During the interactive retention period, data is available for monitoring, troub > The archive feature is currently in public preview and can only be set at the table level, not at the workspace level. ## Configure the default workspace retention policy -You can set the workspace default retention policy in the Azure portal to 30, 31, 60, 90, 120, 180, 270, 365, 550, and 730 days. To set a different policy, use the Resource Manager configuration method described below. If you're on the *free* tier, you need to upgrade to the paid tier to change the data retention period. +You can set the workspace default retention policy in the Azure portal to 30, 31, 60, 90, 120, 180, 270, 365, 550, and 730 days. You can set a different policy for specific tables by [configuring retention and archive policy at the table level](#set-retention-and-archive-policy-by-table). If you're on the *free* tier, you'll need to upgrade to the paid tier to change the data retention period. To set the default workspace retention policy: @@ -37,17 +37,25 @@ To set the default workspace retention policy: ## Set retention and archive policy by table -You can set retention policies for individual tables, except for workspaces in the legacy Free Trial pricing tier, using Azure Resource Manager APIs. You can’t currently configure data retention for individual tables in the Azure portal. +By default, all tables in your workspace inherit the workspace's interactive retention setting and have no archive policy. You can modify the retention and archive policies of individual tables, except for workspaces in the legacy Free Trial pricing tier. You can keep data in interactive retention between 4 and 730 days. You can set the archive period for a total retention time of up to 2,555 days (seven years). -Each table is a subresource of the workspace it's in. For example, you can address the `SecurityEvent` table in [Azure Resource Manager](../../azure-resource-manager/management/overview.md) as: +# [Portal](#tab/portal-1) -``` -/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/MyResourceGroupName/providers/Microsoft.OperationalInsights/workspaces/MyWorkspaceName/Tables/SecurityEvent -``` +To set the retention and archive duration for a table in the Azure portal: + +1. From the **Log Analytics workspaces** menu, select **Tables (preview)**. + + The **Tables (preview)** screen lists all of the tables in the workspace. + +1. Select the context menu for the table you want to configure and select **Manage table**. + + :::image type="content" source="media/basic-logs-configure/log-analytics-table-configuration.png" lightbox="media/basic-logs-configure/log-analytics-table-configuration.png" alt-text="Screenshot showing the Manage table button for one of the tables in a workspace."::: -The table name is case-sensitive. +1. Configure the retention and archive duration in **Data retention settings** section of the table configuration screen. + + :::image type="content" source="media/data-retention-configure/log-analytics-configure-table-retention-archive.png" lightbox="media/data-retention-configure/log-analytics-configure-table-retention-archive.png" alt-text="Screenshot showing the data retention settings on the table configuration screen."::: # [API](#tab/api-1) @@ -58,7 +66,7 @@ PATCH https://management.azure.com/subscriptions/{subscriptionId}/resourcegroups ``` > [!NOTE] -> You don't explicitly specify the archive duration in the API call. Instead, you set the total retention, which specifies the retention plus the archive duration. +> You don't explicitly specify the archive duration in the API call. Instead, you set the total retention, which is the sum of the interactive retention plus the archive duration. You can use either PUT or PATCH, with the following difference: @@ -133,6 +141,15 @@ az monitor log-analytics workspace table update --subscription ContosoSID --reso ## Get retention and archive policy by table +# [Portal](#tab/portal-2) + +To view the retention and archive duration for a table in the Azure portal, from the **Log Analytics workspaces** menu, select **Tables (preview)**. + +The **Tables (preview)** screen shows the interactive retention and archive period for all of the tables in the workspace. + +:::image type="content" source="media/data-retention-configure/log-analytics-view-table-retention-archive.png" lightbox="media/data-retention-configure/log-analytics-view-table-retention-archive.png" alt-text="Screenshot showing the Manage table button for one of the tables in a workspace."::: + + # [API](#tab/api-2) To get the retention policy of a particular table (in this example, `SecurityEvent`), call the **Tables - Get** API: diff --git a/articles/azure-monitor/logs/media/basic-logs-configure/log-analytics-configure-table-plan.png b/articles/azure-monitor/logs/media/basic-logs-configure/log-analytics-configure-table-plan.png new file mode 100644 index 000000000000..ce52c164a24f Binary files /dev/null and b/articles/azure-monitor/logs/media/basic-logs-configure/log-analytics-configure-table-plan.png differ diff --git a/articles/azure-monitor/logs/media/basic-logs-configure/log-analytics-table-configuration.png b/articles/azure-monitor/logs/media/basic-logs-configure/log-analytics-table-configuration.png new file mode 100644 index 000000000000..aeab8ac15f7d Binary files /dev/null and b/articles/azure-monitor/logs/media/basic-logs-configure/log-analytics-table-configuration.png differ diff --git a/articles/azure-monitor/logs/media/data-retention-configure/log-analytics-configure-table-retention-archive.png b/articles/azure-monitor/logs/media/data-retention-configure/log-analytics-configure-table-retention-archive.png new file mode 100644 index 000000000000..c910cedd170e Binary files /dev/null and b/articles/azure-monitor/logs/media/data-retention-configure/log-analytics-configure-table-retention-archive.png differ diff --git a/articles/azure-monitor/logs/media/data-retention-configure/log-analytics-view-table-retention-archive.png b/articles/azure-monitor/logs/media/data-retention-configure/log-analytics-view-table-retention-archive.png new file mode 100644 index 000000000000..2b2f60108f2c Binary files /dev/null and b/articles/azure-monitor/logs/media/data-retention-configure/log-analytics-view-table-retention-archive.png differ diff --git a/articles/azure-monitor/logs/tutorial-custom-logs.md b/articles/azure-monitor/logs/tutorial-custom-logs.md index 3760defb4a42..8bbee1b5c903 100644 --- a/articles/azure-monitor/logs/tutorial-custom-logs.md +++ b/articles/azure-monitor/logs/tutorial-custom-logs.md @@ -253,7 +253,7 @@ Instead of directly configuring the schema of the table, the portal allows you t ```kusto source | extend TimeGenerated = todatetime(Time) - | parse RawData.value with + | parse RawData with ClientIP:string ' ' * ' ' * diff --git a/articles/azure-monitor/monitor-reference.md b/articles/azure-monitor/monitor-reference.md index b4325c5988b1..1bb050c36c5f 100644 --- a/articles/azure-monitor/monitor-reference.md +++ b/articles/azure-monitor/monitor-reference.md @@ -30,7 +30,7 @@ The table below lists the available curated visualizations and more detailed inf | [Azure Monitor for Azure Cache for Redis (preview)](./insights/redis-cache-insights-overview.md) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/redisCacheInsights) | Provides a unified, interactive view of overall performance, failures, capacity, and operational health | | [Azure Cosmos DB Insights](./insights/cosmosdb-insights-overview.md) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/cosmosDBInsights) | Provides a view of the overall performance, failures, capacity, and operational health of all your Azure Cosmos DB resources in a unified interactive experience. | | [Azure Container Insights](/azure/azure-monitor/insights/container-insights-overview) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/containerInsights) | Monitors the performance of container workloads that are deployed to managed Kubernetes clusters hosted on Azure Kubernetes Service (AKS). It gives you performance visibility by collecting metrics from controllers, nodes, and containers that are available in Kubernetes through the Metrics API. Container logs are also collected. After you enable monitoring from Kubernetes clusters, these metrics and logs are automatically collected for you through a containerized version of the Log Analytics agent for Linux. | -| [Azure Data Explorer insights](./insights/data-explorer.md) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/adxClusterInsights) | Azure Data Explorer Insights provides comprehensive monitoring of your clusters by delivering a unified view of your cluster performance, operations, usage, and failures. | +| [Azure Data Explorer insights](/azure/data-explorer/data-explorer-insights) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/adxClusterInsights) | Azure Data Explorer Insights provides comprehensive monitoring of your clusters by delivering a unified view of your cluster performance, operations, usage, and failures. | | [Azure HDInsight (preview)](../hdinsight/log-analytics-migration.md#insights) | Preview | No | An Azure Monitor workbook that collects important performance metrics from your HDInsight cluster and provides the visualizations and dashboards for most common scenarios. Gives a complete view of a single HDInsight cluster including resource utilization and application status| | [Azure IoT Edge](../iot-edge/how-to-explore-curated-visualizations.md) | GA | No | Visualize and explore metrics collected from the IoT Edge device right in the Azure portal using Azure Monitor Workbooks based public templates. The curated workbooks use built-in metrics from the IoT Edge runtime. These views don't need any metrics instrumentation from the workload modules. | | [Azure Key Vault Insights (preview)](./insights/key-vault-insights-overview.md) | GA | [Yes](https://portal.azure.com/#blade/Microsoft_Azure_Monitoring/AzureMonitoringBrowseBlade/keyvaultsInsights) | Provides comprehensive monitoring of your key vaults by delivering a unified view of your Key Vault requests, performance, failures, and latency. | diff --git a/articles/azure-monitor/toc.yml b/articles/azure-monitor/toc.yml index 0c1b0b4e142d..3404be052ec2 100644 --- a/articles/azure-monitor/toc.yml +++ b/articles/azure-monitor/toc.yml @@ -1339,7 +1339,7 @@ items: - name: Azure Cosmos DB href: insights/cosmosdb-insights-overview.md - name: Azure Data Explorer - href: insights/data-explorer.md + href: /azure/data-explorer/data-explorer-insights - name: Log Analytics workspace href: logs/log-analytics-workspace-insights-overview.md - name: Networks diff --git a/articles/azure-monitor/vm/monitor-virtual-machine-alerts.md b/articles/azure-monitor/vm/monitor-virtual-machine-alerts.md index 36f287f49bd5..9af9345af42a 100644 --- a/articles/azure-monitor/vm/monitor-virtual-machine-alerts.md +++ b/articles/azure-monitor/vm/monitor-virtual-machine-alerts.md @@ -83,7 +83,7 @@ Use a rule with the following query. ```kusto Heartbeat -| summarize TimeGenerated=max(TimeGenerated) by Computer +| summarize TimeGenerated=max(TimeGenerated) by Computer, _ResourceId | extend Duration = datetime_diff('minute',now(),TimeGenerated) | summarize AggregatedValue = min(Duration) by Computer, bin(TimeGenerated,5m), _ResourceId ``` diff --git a/articles/azure-monitor/whats-new.md b/articles/azure-monitor/whats-new.md index 50ce16a09bd6..e39788cbc8b1 100644 --- a/articles/azure-monitor/whats-new.md +++ b/articles/azure-monitor/whats-new.md @@ -510,7 +510,7 @@ This article lists significant changes to Azure Monitor documentation. **Updated articles** -- [Azure Data Explorer Insights](insights/data-explorer.md) +- [Azure Data Explorer Insights](/azure/data-explorer/data-explorer-insights) - [Agent Health solution in Azure Monitor](insights/solution-agenthealth.md) - [Monitoring solutions in Azure Monitor](insights/solutions.md) - [Monitor your SQL deployments with SQL Insights (preview)](insights/sql-insights-overview.md) diff --git a/articles/azure-netapp-files/configure-ldap-extended-groups.md b/articles/azure-netapp-files/configure-ldap-extended-groups.md index 741da3e818bd..497299426d11 100644 --- a/articles/azure-netapp-files/configure-ldap-extended-groups.md +++ b/articles/azure-netapp-files/configure-ldap-extended-groups.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: how-to -ms.date: 03/15/2022 +ms.date: 05/27/2022 ms.author: anfdocs --- # Enable Active Directory Domain Services (ADDS) LDAP authentication for NFS volumes @@ -24,7 +24,7 @@ Azure NetApp Files supports fetching of extended groups from the LDAP name servi When it’s determined that LDAP will be used for operations such as name lookup and fetching extended groups, the following process occurs: 1. Azure NetApp Files uses an LDAP client configuration to make a connection attempt to the ADDS/AADDS LDAP server that is specified in the [Azure NetApp Files AD configuration](create-active-directory-connections.md). -1. If the TCP connection over the defined ADDS/AADDS LDAP service port is successful, then the Azure NetApp Files LDAP client attempts to “bind” (log in) to the ADDS/AADDS LDAP server (domain controller) by using the defined credentials in the LDAP client configuration. +1. If the TCP connection over the defined ADDS/AADDS LDAP service port is successful, then the Azure NetApp Files LDAP client attempts to “bind” (sign in) to the ADDS/AADDS LDAP server (domain controller) by using the defined credentials in the LDAP client configuration. 1. If the bind is successful, then the Azure NetApp Files LDAP client uses the RFC 2307bis LDAP schema to make an LDAP search query to the ADDS/AADDS LDAP server (domain controller). The following information is passed to the server in the query: * [Base/user DN](configure-ldap-extended-groups.md#ldap-search-scope) (to narrow search scope) @@ -98,7 +98,7 @@ The following information is passed to the server in the query: ![Screenshot that shows Create a Volume page with LDAP option.](../media/azure-netapp-files/create-nfs-ldap.png) 7. Optional - You can enable local NFS client users not present on the Windows LDAP server to access an NFS volume that has LDAP with extended groups enabled. To do so, enable the **Allow local NFS users with LDAP** option as follows: - 1. Click **Active Directory connections**. On an existing Active Directory connection, click the context menu (the three dots `…`), and select **Edit**. + 1. Select **Active Directory connections**. On an existing Active Directory connection, select the context menu (the three dots `…`), and select **Edit**. 2. On the **Edit Active Directory settings** window that appears, select the **Allow local NFS users with LDAP** option. ![Screenshot that shows the Allow local NFS users with LDAP option](../media/azure-netapp-files/allow-local-nfs-users-with-ldap.png) @@ -119,5 +119,6 @@ The following information is passed to the server in the query: * [Create an NFS volume for Azure NetApp Files](azure-netapp-files-create-volumes.md) * [Create and manage Active Directory connections](create-active-directory-connections.md) * [Configure NFSv4.1 domain](azure-netapp-files-configure-nfsv41-domain.md#configure-nfsv41-domain) +* [Configure an NFS client for Azure NetApp Files](configure-nfs-clients.md) * [Troubleshoot volume errors for Azure NetApp Files](troubleshoot-volumes.md) * [Modify Active Directory connections for Azure NetApp Files](modify-active-directory-connections.md) diff --git a/articles/azure-netapp-files/configure-nfs-clients.md b/articles/azure-netapp-files/configure-nfs-clients.md index 70a6bd334bee..d5eb98e6f808 100644 --- a/articles/azure-netapp-files/configure-nfs-clients.md +++ b/articles/azure-netapp-files/configure-nfs-clients.md @@ -12,12 +12,12 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: how-to -ms.date: 09/22/2021 +ms.date: 05/27/2022 ms.author: anfdocs --- # Configure an NFS client for Azure NetApp Files -The NFS client configuration described in this article is part of the setup when you [configure NFSv4.1 Kerberos encryption](configure-kerberos-encryption.md) or [create a dual-protocol volume](create-volumes-dual-protocol.md). A wide variety of Linux distributions are available to use with Azure NetApp Files. This article describes configurations for two of the more commonly used environments: RHEL 8 and Ubuntu 18.04. +The NFS client configuration described in this article is part of the setup when you [configure NFSv4.1 Kerberos encryption](configure-kerberos-encryption.md) or [create a dual-protocol volume](create-volumes-dual-protocol.md) or [NFSv3/NFSv4.1 with LDAP](configure-ldap-extended-groups.md). A wide variety of Linux distributions are available to use with Azure NetApp Files. This article describes configurations for two of the more commonly used environments: RHEL 8 and Ubuntu 18.04. ## Requirements and considerations diff --git a/articles/azure-percept/azure-percept-devkit-software-release-notes.md b/articles/azure-percept/azure-percept-devkit-software-release-notes.md index cf4d6de20fb4..aae5f0b23713 100644 --- a/articles/azure-percept/azure-percept-devkit-software-release-notes.md +++ b/articles/azure-percept/azure-percept-devkit-software-release-notes.md @@ -15,6 +15,11 @@ This page provides information of changes and fixes for each Azure Percept DK OS To download the update images, refer to [Azure Percept DK software releases for USB cable update](./software-releases-usb-cable-updates.md) or [Azure Percept DK software releases for OTA update](./software-releases-over-the-air-updates.md). +## May (2205) Release + +- Operating System + - Latest security updates on BIND, Node.js, Cyrus SASL, libxml2, and OpenSSL packages. + ## March (2203) Release - Operating System diff --git a/articles/azure-percept/software-releases-usb-cable-updates.md b/articles/azure-percept/software-releases-usb-cable-updates.md index fc2ea5ab8e6e..7b4ea5e7e2d0 100644 --- a/articles/azure-percept/software-releases-usb-cable-updates.md +++ b/articles/azure-percept/software-releases-usb-cable-updates.md @@ -23,7 +23,7 @@ This page provides information and download links for all the dev kit OS/firmwar ## Latest releases - **Latest service release** -March Service Release (2203): [Azure-Percept-DK-1.0.20220310.1223-public_preview_1.0.zip]() +May Service Release (2205): [Azure-Percept-DK-1.0.20220511.1756-public_preview_1.0.zip]() - **Latest major update or known stable version** Feature Update (2104): [Azure-Percept-DK-1.0.20210409.2055.zip](https://download.microsoft.com/download/6/4/d/64d53e60-f702-432d-a446-007920a4612c/Azure-Percept-DK-1.0.20210409.2055.zip) @@ -31,6 +31,7 @@ Feature Update (2104): [Azure-Percept-DK-1.0.20210409.2055.zip](https://download |Release|Download Links|Note| |---|---|:---:| +|May Service Release (2205)|[Azure-Percept-DK-1.0.20220511.1756-public_preview_1.0.zip]()|| |March Service Release (2203)|[Azure-Percept-DK-1.0.20220310.1223-public_preview_1.0.zip]()|| |February Service Release (2202)|[Azure-Percept-DK-1.0.20220209.1156-public_preview_1.0.zip]()|| |January Service Release (2201)|[Azure-Percept-DK-1.0.20220112.1519-public_preview_1.0.zip]()|| diff --git a/articles/azure-signalr/signalr-howto-move-across-regions.md b/articles/azure-signalr/signalr-howto-move-across-regions.md index 18c6f414323e..1b98c008cdd4 100644 --- a/articles/azure-signalr/signalr-howto-move-across-regions.md +++ b/articles/azure-signalr/signalr-howto-move-across-regions.md @@ -1,47 +1,50 @@ --- -title: Move an Azure SignalR resource to another region | Microsoft Docs -description: Shows you how to move an Azure SignalR resource to another region. +title: Move an Azure SignalR resource to another region +description: Learn how to use an Azure Resource Manager template to export the configuration of an Azure SignalR resource to a different Azure region. author: vicancy ms.service: signalr ms.topic: how-to -ms.date: 12/22/2021 +ms.date: 05/23/2022 ms.author: lianwei -ms.custom: subject-moving-resources +ms.custom: +- subject-moving-resources +- kr2b-contr-experiment --- # Move an Azure SignalR resource to another region -There are various scenarios in which you'd want to move your existing SignalR resource from one region to another. **Azure SignalR resource are region specific and can't be moved from one region to another.** You can however, use an Azure Resource Manager template to export the existing configuration of an Azure SignalR resource, modify the parameters to match the destination region, and then create a copy of your SignalR resource in another region. For more information on Resource Manager and templates, see [Quickstart: Create and deploy Azure Resource Manager templates by using the Azure portal](../azure-resource-manager/templates/quickstart-create-templates-use-the-portal.md). +Azure SignalR resources are region specific and can't be moved from one region to another. There are, however, scenarios where you might want to move your existing SignalR resource to another region. -## Prerequisites - -- Ensure that the service and features that your are using are supported in the target region. +You can use an Azure Resource Manager template to export the existing configuration of an Azure SignalR resource, modify the parameters to match the destination region, and then create a copy of your SignalR resource in another region. For more information on Resource Manager and templates, see [Quickstart: Create and deploy Azure Resource Manager templates by using the Azure portal](../azure-resource-manager/templates/quickstart-create-templates-use-the-portal.md). -- Verify that your Azure subscription allows you to create SignalR resource in the target region that's used. Contact support to enable the required quota. +## Prerequisites +- Ensure that the service and features that you're using are supported in the target region. +- Verify that your Azure subscription allows you to create SignalR resource in the target region that's used. +- Contact support to enable the required quota. - For preview features, ensure that your subscription is allowlisted for the target region. -## Prepare and move +## Prepare and move your SignalR resource To get started, export, and then modify a Resource Manager template. -### Export the template and deploy from the Portal +### Export the template and deploy from the Azure portal The following steps show how to prepare the SignalR resource move using a Resource Manager template, and move it to the target region using the portal. -1. Sign in to the [Azure portal](https://portal.azure.com) > **Resource Groups**. +1. Sign in to the [Azure portal](https://portal.azure.com). -2. Locate the Resource Group that contains the source SignalR resource and click on it. +1. Select **Resource Groups**. Locate the resource group that contains the source SignalR resource and select it. -3. Select > **Automation** > **Export template**. +1. Under **Automation**, select **Export template**. -4. Choose **Deploy** in the **Export template** blade. +1. Select **Deploy**. -5. Click **TEMPLATE** > **Edit parameters** to open the **parameters.json** file in the online editor. +1. Select **TEMPLATE** > **Edit parameters** to open the *parameters.json* file in the online editor. -6. To edit the parameter of the SignalR resource name, change the **value** property under **parameters**: +1. To edit the parameter of the SignalR resource name, change the `value` property under `parameters`: ```json { @@ -55,13 +58,13 @@ The following steps show how to prepare the SignalR resource move using a Resour } ``` -7. Change the value in the editor to a name of your choice for the target SignalR resource. Ensure you enclose the name in quotes. +1. Change the value in the editor to a name of your choice for the target SignalR resource. Ensure you enclose the name in quotes. -8. Click **Save** in the editor. +1. Select **Save** in the editor. -9. Click **TEMPLATE** > **Edit template** to open the **template.json** file in the online editor. +1. Select **TEMPLATE** > **Edit template** to open the *template.json* file in the online editor. -10. To edit the target region, change the **location** property under **resources** in the online editor: +1. To edit the target region, change the `location` property under `resources` in the online editor: ```json "resources": [ @@ -77,20 +80,19 @@ The following steps show how to prepare the SignalR resource move using a Resour ``` -11. To obtain region location codes, see [Azure SignalR Locations](https://azure.microsoft.com/global-infrastructure/services/?products=signalr-service). The code for a region is the region name with no spaces, **Central US** = **centralus**. - -12. You can also change other parameters in the template if you choose, and are optional depending on your requirements. +1. To obtain region location codes, see [Azure SignalR Locations](https://azure.microsoft.com/global-infrastructure/services/?products=signalr-service). The code for a region is the region name with no spaces, **Central US** = **centralus**. -13. Click **Save** in the online editor. +1. You can also change other parameters in the template if you choose, and are optional depending on your requirements. -14. Click **BASICS** > **Subscription** to choose the subscription where the target resource will be deployed. +1. Select **Save** in the online editor. -15. Click **BASICS** > **Resource group** to choose the resource group where the target resource will be deployed. You can click **Create new** to create a new resource group for the target resource. Ensure the name isn't the same as the source resource group of the existing resource. +1. Select **BASICS** > **Subscription** to choose the subscription where the target resource will be deployed. -16. Verify **BASICS** > **Location** is set to the target location where you wish for the resource to be deployed. +1. Select **BASICS** > **Resource group** to choose the resource group where the target resource will be deployed. You can select **Create new** to create a new resource group for the target resource. Ensure the name isn't the same as the source resource group of the existing resource. -17. Click the **Review + create** button to deploy the target Azure SignalR resource. +1. Verify **BASICS** > **Location** is set to the target location where you wish for the resource to be deployed. +1. Select **Review + create** to deploy the target Azure SignalR resource. ### Export the template and deploy using Azure PowerShell @@ -102,14 +104,14 @@ To export a template by using PowerShell: Connect-AzAccount ``` -2. If your identity is associated with more than one subscription, then set your active subscription to subscription of the SignalR resource that you want to move. +1. If your identity is associated with more than one subscription, then set your active subscription to subscription of the SignalR resource that you want to move. ```azurepowershell-interactive $context = Get-AzSubscription -SubscriptionId Set-AzContext $context ``` -3. Export the template of your source SignalR resource. These commands save a json template to your current directory. +1. Export the template of your source SignalR resource. These commands save a JSON template to your current directory. ```azurepowershell-interactive $resource = Get-AzResource ` @@ -122,14 +124,14 @@ To export a template by using PowerShell: -IncludeParameterDefaultValue ``` -4. The file downloaded will be named after the resource group the resource was exported from. Locate the file that was exported from the command named **\.json** and open it in an editor of your choice: - +1. The file downloaded will be named after the resource group the resource was exported from. Locate the file that was exported from the command named *\.json* and open it in an editor of your choice: + ```azurepowershell notepad .json ``` -5. To edit the parameter of the SignalR resource name, change the property **defaultValue** of the source SignalR resource name to the name of your target SignalR resource, ensure the name is in quotes: - +1. To edit the parameter of the SignalR resource name, change the property `defaultValue` of the source SignalR resource name to the name of your target SignalR resource. Ensure the name is in quotes: + ```json { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", @@ -143,7 +145,7 @@ To export a template by using PowerShell: } ``` -6. To edit the target region where the SignalR resource will be moved, change the **location** property under resources: +1. To edit the target region where the SignalR resource will be moved, change the `location` property under `resources`: ```json "resources": [ @@ -158,49 +160,46 @@ To export a template by using PowerShell: ] ``` -7. To obtain region location codes, see [Azure SignalR Locations](https://azure.microsoft.com/global-infrastructure/services/?products=signalr-service). The code for a region is the region name with no spaces, **Central US** = **centralus**. +1. To obtain region location codes, see [Azure SignalR Locations](https://azure.microsoft.com/global-infrastructure/services/?products=signalr-service). The code for a region is the region name with no spaces, **Central US** = **centralus**. + + You can also change other parameters in the template if you choose, depending on your requirements. -8. You can also change other parameters in the template if you choose, and are optional depending on your requirements. +1. Save the *\.json* file. -9. Save the **\.json** file. +1. Create a resource group in the target region for the target SignalR resource to be deployed using [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup). -10. Create a resource group in the target region for the target SignalR resource to be deployed using [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup). - ```azurepowershell-interactive New-AzResourceGroup -Name -location ``` -11. Deploy the edited **\.json** file to the resource group created in the previous step using [New-AzResourceGroupDeployment](/powershell/module/az.resources/new-azresourcegroupdeployment): +1. Deploy the edited *\.json* file to the resource group created in the previous step using [New-AzResourceGroupDeployment](/powershell/module/az.resources/new-azresourcegroupdeployment): ```azurepowershell-interactive New-AzResourceGroupDeployment -ResourceGroupName -TemplateFile .json ``` -12. To verify the resources were created in the target region, use [Get-AzResourceGroup](/powershell/module/az.resources/get-azresourcegroup) and [Get-AzSignalR](/powershell/module/az.signalr/get-azsignalr): - - ```azurepowershell-interactive - Get-AzResourceGroup -Name - ``` +1. To verify that the resources were created in the target region, use [Get-AzResourceGroup](/powershell/module/az.resources/get-azresourcegroup) and [Get-AzSignalR](/powershell/module/az.signalr/get-azsignalr): ```azurepowershell-interactive + Get-AzResourceGroup -Name Get-AzSignalR -Name -ResourceGroupName ``` -## Discard - -After the deployment, if you wish to start over or discard the SignalR resource in the target, delete the resource group that was created in the target and the moved SignalR resource will be deleted. To do so, select the resource group from your dashboard in the portal and select **Delete** at the top of the overview page. Alternatively you can use [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup): - -```azurepowershell-interactive -Remove-AzResourceGroup -Name -``` +> [!NOTE] +> +> After the deployment, if you wish to start over or discard the SignalR resource in the target, delete the resource group that was created in the target, which deletes the moved SignalR resource. To do so, select the resource group from your dashboard in the portal and select **Delete** at the top of the overview page. Alternatively you can use [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup): +> +> ```azurepowershell-interactive +> Remove-AzResourceGroup -Name +> ``` -## Clean up +## Clean up source region To commit the changes and complete the move of the SignalR resource, delete the source SignalR resource or resource group. To do so, select the SignalR resource or resource group from your dashboard in the portal and select **Delete** at the top of each page. ## Next steps -In this tutorial, you moved an Azure SignalR resource from one region to another and cleaned up the source resources. To learn more about moving resources between regions and disaster recovery in Azure, refer to: +In this tutorial, you moved an Azure SignalR resource from one region to another and cleaned up the source resources. To learn more about moving resources between regions and disaster recovery in Azure, see: - [Move resources to a new resource group or subscription](../azure-resource-manager/management/move-resource-group-and-subscription.md) - [Move Azure VMs to another region](../site-recovery/azure-to-azure-tutorial-migrate.md) diff --git a/articles/azure-video-analyzer/video-analyzer-docs/edge/deploy-iot-edge-linux-on-windows.md b/articles/azure-video-analyzer/video-analyzer-docs/edge/deploy-iot-edge-linux-on-windows.md index f1b55a2d27d9..0dc81aa2be2f 100644 --- a/articles/azure-video-analyzer/video-analyzer-docs/edge/deploy-iot-edge-linux-on-windows.md +++ b/articles/azure-video-analyzer/video-analyzer-docs/edge/deploy-iot-edge-linux-on-windows.md @@ -93,7 +93,7 @@ The following depicts the overall flow of the document and in 5 simple steps you ## Next steps * Try motion detection along with recording relevant videos in the Cloud. Follow the steps from the [detect motion and record video clips](detect-motion-record-video-edge-devices.md) quickstart. -* Use our [VS Code extension](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.live-video-analytics-edge) to view additional pipelines. +* Use our [VS Code extension](https://marketplace.visualstudio.com/vscode) to view additional pipelines. * Use an [IP camera](https://en.wikipedia.org/wiki/IP_camera) that supports RTSP instead of using the RTSP simulator. You can find IP cameras that support RTSP on the [ONVIF conformant products](https://www.onvif.org/conformant-products/) page. Look for devices that conform with profiles G, S, or T. * Run [AI on Live Video](analyze-live-video-use-your-model-http.md#overview) (you can skip the prerequisite setup as it has already been done above). diff --git a/articles/azure-video-indexer/audio-effects-detection.md b/articles/azure-video-indexer/audio-effects-detection.md index f75106fe6765..af2146e7a88c 100644 --- a/articles/azure-video-indexer/audio-effects-detection.md +++ b/articles/azure-video-indexer/audio-effects-detection.md @@ -18,7 +18,7 @@ Some scenarios where this feature is useful: ## Supported audio categories -**Audio effect detection** can detect and classify 7 different categories. In the next table, you can find the different categories split in to the different presets, divided to **Standard** and **Advanced**. For more information, see [pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +**Audio effect detection** can detect and classify 7 different categories. In the next table, you can find the different categories split in to the different presets, divided to **Standard** and **Advanced**. For more information, see [pricing](https://azure.microsoft.com/pricing/details/media-services/). |Indexing type |Standard indexing| Advanced indexing| |---|---|---| diff --git a/articles/azure-video-indexer/compare-video-indexer-with-media-services-presets.md b/articles/azure-video-indexer/compare-video-indexer-with-media-services-presets.md index 375f33604b8c..0b303ee016c5 100644 --- a/articles/azure-video-indexer/compare-video-indexer-with-media-services-presets.md +++ b/articles/azure-video-indexer/compare-video-indexer-with-media-services-presets.md @@ -19,7 +19,7 @@ Currently, there is an overlap between features offered by the [Azure Video Inde |---|---|---| |Media Insights|[Enhanced](video-indexer-output-json-v2.md) |[Fundamentals](/azure/media-services/latest/analyze-video-audio-files-concept)| |Experiences|See the full list of supported features:
    [Overview](video-indexer-overview.md)|Returns video insights only| -|Billing|[Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/#analytics)|[Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/#analytics)| +|Billing|[Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/#analytics) |[Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/#analytics) | |Compliance|For the most current compliance updates, visit [Azure Compliance Offerings.pdf](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942/file/178110/23/Microsoft%20Azure%20Compliance%20Offerings.pdf) and search for "Azure Video Indexer" to see if it complies with a certificate of interest.|For the most current compliance updates, visit [Azure Compliance Offerings.pdf](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942/file/178110/23/Microsoft%20Azure%20Compliance%20Offerings.pdf) and search for "Media Services" to see if it complies with a certificate of interest.| |Free Trial|East US|Not available| |Region availability|See [Cognitive Services availability by region](https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services)|See [Media Services availability by region](https://azure.microsoft.com/global-infrastructure/services/?products=media-services).| diff --git a/articles/azure-video-indexer/considerations-when-use-at-scale.md b/articles/azure-video-indexer/considerations-when-use-at-scale.md index aee846af7b7a..d29470add6ad 100644 --- a/articles/azure-video-indexer/considerations-when-use-at-scale.md +++ b/articles/azure-video-indexer/considerations-when-use-at-scale.md @@ -43,7 +43,7 @@ To see an example of how to upload videos using URL, check out [this example](up ## Automatic Scaling of Media Reserved Units -Starting August 1st 2021, Azure Video Indexer enabled [Reserved Units](/azure/azure/media-services/latest/concept-media-reserved-units)(MRUs) auto scaling by [Azure Media Services](/azure/azure/media-services/latest/media-services-overview) (AMS), as a result you do not need to manage them through Azure Video Indexer. That will allow price optimization, e.g. price reduction in many cases, based on your business needs as it is being auto scaled. +Starting August 1st 2021, Azure Video Indexer enabled [Reserved Units](/azure/media-services/latest/concept-media-reserved-units)(MRUs) auto scaling by [Azure Media Services](/azure/media-services/latest/media-services-overview) (AMS), as a result you do not need to manage them through Azure Video Indexer. That will allow price optimization, e.g. price reduction in many cases, based on your business needs as it is being auto scaled. ## Respect throttling diff --git a/articles/azure-video-indexer/deploy-with-arm-template.md b/articles/azure-video-indexer/deploy-with-arm-template.md index e1a248589591..99ac42e2435b 100644 --- a/articles/azure-video-indexer/deploy-with-arm-template.md +++ b/articles/azure-video-indexer/deploy-with-arm-template.md @@ -20,7 +20,7 @@ The resource will be deployed to your subscription and will create the Azure Vid ## Prerequisites -* An Azure Media Services (AMS) account. You can create one for free through the [Create AMS Account](/azure/azure/media-services/latest/account-create-how-to). +* An Azure Media Services (AMS) account. You can create one for free through the [Create AMS Account](/azure/media-services/latest/account-create-how-to). ## Deploy the sample @@ -52,7 +52,7 @@ The resource will be deployed to your subscription and will create the Azure Vid ``` > [!NOTE] -> If you would like to work with bicep format, inspect the [bicep file](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ARM-Samples/Create-Account/avam.template.bicep) on this repo. +> If you would like to work with bicep format, inspect the [bicep file](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ARM-Quick-Start/avam.template.bicep) on this repo. ## Parameters @@ -106,4 +106,4 @@ If you're new to template deployment, see: ## Next steps -[Connect an existing classic paid Azure Video Indexer account to ARM-based account](connect-classic-account-to-arm.md) \ No newline at end of file +[Connect an existing classic paid Azure Video Indexer account to ARM-based account](connect-classic-account-to-arm.md) diff --git a/articles/azure-video-indexer/faq.yml b/articles/azure-video-indexer/faq.yml index 33e0fef9e0ba..2547d15b14b2 100644 --- a/articles/azure-video-indexer/faq.yml +++ b/articles/azure-video-indexer/faq.yml @@ -2,7 +2,7 @@ metadata: title: Frequently asked questions about Azure Video Indexer - Azure description: This article gives answers to frequently asked questions about Azure Video Indexer. - services: azure-video-analyzer + services: azure-video-indexer author: Juliako manager: femila ms.topic: faq @@ -37,7 +37,7 @@ sections: answer: | Azure Video Indexer includes a free trial offering that provides you with 600 minutes in the web-based interface and 2,400 minutes via the API. You can [login to the Azure Video Indexer web-based interface](https://www.videoindexer.ai/) and try it for yourself using any web identity and without having to set up an Azure Subscription. Follow [this easy introduction lab](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/IntroToVideoIndexer.md) to get better idea of how to use Azure Video Indexer. - To index videos and audio flies at scale, you can connect Azure Video Indexer to a paid Microsoft Azure subscription. You can find more information on pricing on the [pricing](https://azure.microsoft.com/pricing/details/cognitive-services/video-indexer/) page. + To index videos and audio flies at scale, you can connect Azure Video Indexer to a paid Microsoft Azure subscription. You can find more information on pricing on the [pricing](https://azure.microsoft.com/pricing/details/video-indexer/) page. You can find more information on getting started in [Get started](video-indexer-get-started.md). @@ -95,7 +95,7 @@ sections: - question: What is the SLA for Azure Video Indexer? answer: | - Azure Media Service’s SLA covers Azure Video Indexer and can be found on the [SLA](https://azure.microsoft.com/support/legal/sla/azure/media-services/v1_2/) page. The SLA only applies to Azure Video Indexer paid accounts and does not apply to the free trial. + Azure Media Service’s SLA covers Azure Video Indexer and can be found on the [SLA](https://azure.microsoft.com/support/legal/sla/media-services/v1_2/) page. The SLA only applies to Azure Video Indexer paid accounts and does not apply to the free trial. - name: Privacy Questions questions: diff --git a/articles/azure-video-indexer/manage-account-connected-to-azure.md b/articles/azure-video-indexer/manage-account-connected-to-azure.md index 56deb8a7884d..def37b0b3735 100644 --- a/articles/azure-video-indexer/manage-account-connected-to-azure.md +++ b/articles/azure-video-indexer/manage-account-connected-to-azure.md @@ -64,7 +64,7 @@ If your account needs some adjustments, you see relevant errors and warnings abo * Media reserved units - You must allocate Media Reserved Units on your Media Service resource in order to index videos. For optimal indexing performance, it's recommended to allocate at least 10 S3 Reserved Units. For pricing information, see the FAQ section of the [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/) page. + You must allocate Media Reserved Units on your Media Service resource in order to index videos. For optimal indexing performance, it's recommended to allocate at least 10 S3 Reserved Units. For pricing information, see the FAQ section of the [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/) page. ## Next steps diff --git a/articles/azure-video-indexer/odrv-download.md b/articles/azure-video-indexer/odrv-download.md index 14b4394d46bf..5802a6882569 100644 --- a/articles/azure-video-indexer/odrv-download.md +++ b/articles/azure-video-indexer/odrv-download.md @@ -11,7 +11,7 @@ This article shows how to index videos stored on OneDrive by using the Azure Vid ## Supported file formats -For a list of file formats that you can use with Azure Video Indexer, see [Standard Encoder formats and codecs](/azure/azure/media-services/latest/encode-media-encoder-standard-formats-reference). +For a list of file formats that you can use with Azure Video Indexer, see [Standard Encoder formats and codecs](/azure/media-services/latest/encode-media-encoder-standard-formats-reference). ## Index a video by using the website @@ -91,7 +91,7 @@ Use this parameter to define an AI bundle that you want to apply on your audio o Azure Video Indexer covers up to two tracks of audio. If the file has more audio tracks, they're treated as one track. If you want to index the tracks separately, you need to extract the relevant audio file and index it as `AudioOnly`. -Price depends on the selected indexing option. For more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +Price depends on the selected indexing option. For more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). #### priority @@ -108,7 +108,7 @@ When you're using the [Upload Video](https://api-portal.videoindexer.ai/api-deta After the indexing and encoding jobs are done, the video is published so you can also stream your video. The streaming endpoint from which you want to stream the video must be in the **Running** state. For `SingleBitrate`, the standard encoder cost will apply for the output. If the video height is greater than or equal to 720, Azure Video Indexer encodes it as 1280 x 720. Otherwise, it's encoded as 640 x 468. -The default setting is [content-aware encoding](/azure/azure/media-services/latest/encode-content-aware-concept). +The default setting is [content-aware encoding](/azure/media-services/latest/encode-content-aware-concept). If you only want to index your video and not encode it, set `streamingPreset` to `NoStreaming`. diff --git a/articles/azure-video-indexer/release-notes.md b/articles/azure-video-indexer/release-notes.md index 08ccf1a919a5..9f80cecf6b05 100644 --- a/articles/azure-video-indexer/release-notes.md +++ b/articles/azure-video-indexer/release-notes.md @@ -100,8 +100,7 @@ Azure Video Indexer website is now supporting account management based on ARM in ### Leverage open-source code to create ARM based account -Added new code samples including HTTP calls to use Azure Video Indexer create, read, update and delete (CRUD) ARM API for solution developers. See [this sample](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ARM-Samples/Create-Account -). +Added new code samples including HTTP calls to use Azure Video Indexer create, read, update and delete (CRUD) ARM API for solution developers. See [this sample](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ARM-Quick-Start). ## January 2022 @@ -182,7 +181,7 @@ Fixed bugs related to CSS, theming and accessibility: ### Automatic Scaling of Media Reserved Units -Starting August 1st 2021, Azure Video Indexer enabled [Media Reserved Units (MRUs)](/azure/azure/media-services/latest/concept-media-reserved-units) auto scaling by [Azure Media Services](/azure/azure/media-services/latest/media-services-overview), as a result you do not need to manage them through Azure Video Indexer. That will allow price optimization, for example price reduction in many cases, based on your business needs as it is being auto scaled. +Starting August 1st 2021, Azure Video Indexer enabled [Media Reserved Units (MRUs)](/azure/media-services/latest/concept-media-reserved-units) auto scaling by [Azure Media Services](/azure/media-services/latest/media-services-overview), as a result you do not need to manage them through Azure Video Indexer. That will allow price optimization, for example price reduction in many cases, based on your business needs as it is being auto scaled. ## June 2021 @@ -249,7 +248,7 @@ You can now see the detected acoustic events in the closed captions file. The fi ### Audio analysis -Audio analysis is available now in additional new bundle of audio features at different price point. The new **Basic Audio** analysis preset provides a low-cost option to only extract speech transcription, translation and format output captions and subtitles. The **Basic Audio** preset will produce two separate meters on your bill, including a line for transcription and a separate line for caption and subtitle formatting. More information on the pricing, see the [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/) page. +Audio analysis is available now in additional new bundle of audio features at different price point. The new **Basic Audio** analysis preset provides a low-cost option to only extract speech transcription, translation and format output captions and subtitles. The **Basic Audio** preset will produce two separate meters on your bill, including a line for transcription and a separate line for caption and subtitle formatting. More information on the pricing, see the [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/) page. The newly added bundle is available when indexing or re-indexing your file by choosing the **Advanced option** -> **Basic Audio** preset (under the **Video + audio indexing** drop-down box). diff --git a/articles/azure-video-indexer/toc.yml b/articles/azure-video-indexer/toc.yml index 22575b6d053d..6253ac5408c2 100644 --- a/articles/azure-video-indexer/toc.yml +++ b/articles/azure-video-indexer/toc.yml @@ -133,7 +133,7 @@ - name: Azure Roadmap href: https://azure.microsoft.com/roadmap/?category=web-mobile - name: Pricing - href: https://azure.microsoft.com/en-us/pricing/details/video-indexer/ + href: https://azure.microsoft.com/pricing/details/video-indexer/ - name: Regional availability href: https://azure.microsoft.com/global-infrastructure/services/ - name: Regions diff --git a/articles/azure-video-indexer/upload-index-videos.md b/articles/azure-video-indexer/upload-index-videos.md index a22eb519ecdc..2cdedd3d83fd 100644 --- a/articles/azure-video-indexer/upload-index-videos.md +++ b/articles/azure-video-indexer/upload-index-videos.md @@ -15,7 +15,7 @@ When you're creating an Azure Video Indexer account, you choose between: - A free trial account. Azure Video Indexer provides up to 600 minutes of free indexing to website users and up to 2,400 minutes of free indexing to API users. - A paid option where you're not limited by a quota. You create an Azure Video Indexer account that's [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for indexed minutes. -For more information about account types, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +For more information about account types, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). After you upload and index a video, you can use [Azure Video Indexer website](video-indexer-view-edit.md) or [Azure Video Indexer Developer Portal](video-indexer-use-apis.md) to see the insights of the video (see [Examine the Azure Video Indexer output](video-indexer-output-json-v2.md)). @@ -23,7 +23,7 @@ When you're uploading videos by using the API, you have the following options: * Upload your video from a URL (preferred). * Send the video file as a byte array in the request body. -* Use existing an Azure Media Services asset by providing the [asset ID](/azure/azure/media-services/latest/assets-concept). This option is supported in paid accounts only. +* Use existing an Azure Media Services asset by providing the [asset ID](/azure/media-services/latest/assets-concept). This option is supported in paid accounts only. ## Supported file formats @@ -91,7 +91,7 @@ Use this parameter to define an AI bundle that you want to apply on your audio o Azure Video Indexer covers up to two tracks of audio. If the file has more audio tracks, they're treated as one track. If you want to index the tracks separately, you need to extract the relevant audio file and index it as `AudioOnly`. -Price depends on the selected indexing option. For more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +Price depends on the selected indexing option. For more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). #### priority @@ -108,7 +108,7 @@ When you're using the [Upload Video](https://api-portal.videoindexer.ai/api-deta After the indexing and encoding jobs are done, the video is published so you can also stream your video. The streaming endpoint from which you want to stream the video must be in the **Running** state. For `SingleBitrate`, the standard encoder cost will apply for the output. If the video height is greater than or equal to 720, Azure Video Indexer encodes it as 1280 x 720. Otherwise, it's encoded as 640 x 468. -The default setting is [content-aware encoding](/azure/azure/media-services/latest/encode-content-aware-concept). +The default setting is [content-aware encoding](/azure/media-services/latest/encode-content-aware-concept). If you only want to index your video and not encode it, set `streamingPreset` to `NoStreaming`. diff --git a/articles/azure-video-indexer/video-indexer-get-started.md b/articles/azure-video-indexer/video-indexer-get-started.md index f1aa77a45f86..0cb23060c130 100644 --- a/articles/azure-video-indexer/video-indexer-get-started.md +++ b/articles/azure-video-indexer/video-indexer-get-started.md @@ -11,7 +11,7 @@ ms.custom: mode-other This getting started quickstart shows how to sign in to the Azure Video Indexer website and how to upload your first video. -When creating an Azure Video Indexer account, you can choose a free trial account (where you get a certain number of free indexing minutes) or a paid option (where you aren't limited by the quota). With free trial, Azure Video Indexer provides up to 600 minutes of free indexing to website users and up to 2400 minutes of free indexing to API users. With paid option, you create an Azure Video Indexer account that is [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for minutes indexed, for more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +When creating an Azure Video Indexer account, you can choose a free trial account (where you get a certain number of free indexing minutes) or a paid option (where you aren't limited by the quota). With free trial, Azure Video Indexer provides up to 600 minutes of free indexing to website users and up to 2400 minutes of free indexing to API users. With paid option, you create an Azure Video Indexer account that is [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for minutes indexed, for more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). ## Sign up for Azure Video Indexer diff --git a/articles/azure-video-indexer/video-indexer-output-json-v2.md b/articles/azure-video-indexer/video-indexer-output-json-v2.md index bbdb6ca025b2..c0d7901eeb7a 100644 --- a/articles/azure-video-indexer/video-indexer-output-json-v2.md +++ b/articles/azure-video-indexer/video-indexer-output-json-v2.md @@ -13,7 +13,8 @@ ms.author: juliako When a video is indexed, Azure Video Indexer produces the JSON content that contains details of the specified video insights. The insights include transcripts, optical character recognition elements (OCRs), faces, topics, blocks, and similar details. Each insight type includes instances of time ranges that show when the insight appears in the video. -The produced JSON output contains `Insights` and `SummarizedInsights` elements. We highly recommend using `Insights` and not using `SummarizedInsights` (which is present for backward compatibility). +> [!TIP] +> The produced JSON output contains `Insights` and `SummarizedInsights` elements. We highly recommend using `Insights` and not using `SummarizedInsights` (which is present for backward compatibility). To visually examine the video's insights, press the **Play** button on the video on the [Azure Video Indexer](https://www.videoindexer.ai/) website. @@ -87,6 +88,9 @@ To get insights produced by the API: This section shows a summary of the insights. +> [!TIP] +> The produced JSON output contains `Insights` and `SummarizedInsights` elements. We highly recommend using `Insights` and not using `SummarizedInsights` (which is present for backward compatibility). + |Attribute | Description| |---|---| |`name`|The name of the video. For example: `Azure Monitor`.| diff --git a/articles/azure-video-indexer/video-indexer-overview.md b/articles/azure-video-indexer/video-indexer-overview.md index 750965a146ac..815e5a83f63f 100644 --- a/articles/azure-video-indexer/video-indexer-overview.md +++ b/articles/azure-video-indexer/video-indexer-overview.md @@ -10,6 +10,9 @@ ms.author: juliako [!INCLUDE [regulation](./includes/regulation.md)] +> [!NOTE] +> The service is now rebranded from Azure Video Analyzer for Media to **Azure Video Indexer**. Click [here](https://vi.microsoft.com) to read more. + Azure Video Indexer is a cloud application, part of Azure Applied AI Services, built on Azure Media Services and Azure Cognitive Services (such as the Face, Translator, Computer Vision, and Speech). It enables you to extract the insights from your videos using Azure Video Indexer video and audio models. To start extracting insights with Azure Video Indexer, you need to create an account and upload videos. When you upload your videos to Azure Video Indexer, it analyses both visuals and audio by running different AI models. As Azure Video Indexer analyzes your video, the insights that are extracted by the AI models. @@ -133,6 +136,7 @@ The following list shows the supported browsers that you can use for the Azure V You're ready to get started with Azure Video Indexer. For more information, see the following articles: +- [Pricing](https://azure.microsoft.com/pricing/details/video-indexer/) - [Get started with the Azure Video Indexer website](video-indexer-get-started.md). - [Process content with Azure Video Indexer REST API](video-indexer-use-apis.md). - [Embed visual widgets in your application](video-indexer-embed-widgets.md). diff --git a/articles/azure-video-indexer/video-indexer-use-apis.md b/articles/azure-video-indexer/video-indexer-use-apis.md index e2470bd3647d..ae728f69fddb 100644 --- a/articles/azure-video-indexer/video-indexer-use-apis.md +++ b/articles/azure-video-indexer/video-indexer-use-apis.md @@ -10,7 +10,7 @@ ms.custom: devx-track-csharp Azure Video Indexer consolidates various audio and video artificial intelligence (AI) technologies offered by Microsoft into one integrated service, making development simpler. The APIs are designed to enable developers to focus on consuming Media AI technologies without worrying about scale, global reach, availability, and reliability of cloud platforms. You can use the API to upload your files, get detailed video insights, get URLs of embeddable insight and player widgets, and more. -When creating an Azure Video Indexer account, you can choose a free trial account (where you get a certain number of free indexing minutes) or a paid option (where you're not limited by the quota). With a free trial, Azure Video Indexer provides up to 600 minutes of free indexing to website users and up to 2400 minutes of free indexing to API users. With a paid option, you create an Azure Video Indexer account that's [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for minutes indexed, for more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +When creating an Azure Video Indexer account, you can choose a free trial account (where you get a certain number of free indexing minutes) or a paid option (where you're not limited by the quota). With a free trial, Azure Video Indexer provides up to 600 minutes of free indexing to website users and up to 2400 minutes of free indexing to API users. With a paid option, you create an Azure Video Indexer account that's [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for minutes indexed, for more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). This article shows how the developers can take advantage of the [Azure Video Indexer API](https://api-portal.videoindexer.ai/). diff --git a/articles/azure-vmware/concepts-design-public-internet-access.md b/articles/azure-vmware/concepts-design-public-internet-access.md new file mode 100644 index 000000000000..8af4cd3bac66 --- /dev/null +++ b/articles/azure-vmware/concepts-design-public-internet-access.md @@ -0,0 +1,73 @@ +--- +title: Concept - Internet connectivity design considerations (Preview) +description: Options for Azure VMware Solution Internet Connectivity. +ms.topic: conceptual +ms.date: 5/12/2022 +--- +# Internet connectivity design considerations (Preview) + +There are three primary patterns for creating outbound access to the Internet from Azure VMware Solution and to enable inbound Internet access to resources on your Azure VMware Solution private cloud. + +- [Internet Service hosted in Azure](#internet-service-hosted-in-azure) +- [Azure VMware Solution Managed SNAT](#azure-vmware-solution-managed-snat) +- [Public IP to NSX edge](#public-ip-to-nsx-edge) + +Your requirements for security controls, visibility, capacity, and operations drive the selection of the appropriate method for delivery of Internet access to the Azure VMware Solution private cloud. + +## Internet Service hosted in Azure + +There are multiple ways to generate a default route in Azure and send it towards your Azure VMware Solution private cloud or on-premise. The options are as follows: + +- An Azure firewall in a Virtual WAN Hub. +- A third-party Network Virtual Appliance in a Virtual WAN Hub Spoke Virtual Network. +- A third-party Network Virtual Appliance in a Native Azure Virtual Network using Azure Route Server. +- A default route from on-premises transferred to Azure VMware Solution over Global Reach. + +Use any of these patterns to provide an outbound SNAT service with the ability to control what sources are allowed out, to view the connection logs, and for some services, do further traffic inspection. + +The same service can also consume an Azure Public IP and create an inbound DNAT from the Internet towards targets in Azure VMware Solution. + +An environment can also be built that utilizes multiple paths for Internet traffic. One for outbound SNAT (for example, a third-party security NVA), and another for inbound DNAT (like a third party Load balancer NVA using SNAT pools for return traffic). + +## Azure VMware Solution Managed SNAT + +A Managed SNAT service provides a simple method for outbound internet access from an Azure VMware Solution private cloud. Features of this service include the following. + +- Easily enabled – select the radio button on the Internet Connectivity tab and all workload networks will have immediate outbound access to the Internet through a SNAT gateway. +- No control over SNAT rules, all sources that reach the SNAT service are allowed. +- No visibility into connection logs. +- Two Public IPs are used and rotated to support up to 128k simultaneous outbound connections. +- No inbound DNAT capability is available with the Azure VMware Solution Managed SNAT. + +## Public IP to NSX edge + +This option brings an allocated Azure Public IP directly to the NSX Edge for consumption. It allows the Azure VMware Solution private cloud to directly consume and apply public network addresses in NSX as required. These addresses are used for the following types of connections: +- Outbound SNAT +- Inbound DNAT +- Load balancing using VMware AVI third-party Network Virtual Appliances +- Applications directly connected to a workload VM interface. + +This option also lets you configure the public address on a third-party Network Virtual Appliance to create a DMZ within the Azure VMware Solution private cloud. + +Features include: + + - Scale – the soft limit of 64 public IPs can be increased by request to 1000s of Public IPs allocated if required by an application. + - Flexibility – A Public IP can be applied anywhere in the NSX ecosystem. It can be used to provide SNAT or DNAT, on load balancers like VMware’s AVI, or third-party Network Virtual Appliances. It can also be used on third-party Network Virtual Security Appliances on VMware segments or on directly on VMs. + - Regionality – the Public IP to the NSX Edge is unique to the local SDDC. For “multi private cloud in distributed regions,” with local exit to Internet intentions, it’s much easier to direct traffic locally versus trying to control default route propagation for a security or SNAT service hosted in Azure. If you've two or more Azure VMware Solution private clouds connected with a Public IP configured, they can both have a local exit. + +## Considerations for selecting an option + +The option that you select depends on the following factors: + +- To add an Azure VMware private cloud to a security inspection point provisioned in Azure native that inspects all Internet traffic from Azure native endpoints, use an Azure native construct and leak a default route from Azure to your Azure VMware Solution private cloud. +- If you need to run a third-party Network Virtual Appliance to conform to existing standards for security inspection or streamlined opex, you have two options. You can run your Public IP in Azure native with the default route method or run it in Azure VMware Solution using Public IP to NSX edge. +- There are scale limits on how many Public IPs can be allocated to a Network Virtual Appliance running in native Azure or provisioned on Azure Firewall. The Public IP to NSX edge option allows for much higher allocations (1000s versus 100s). +- Use a Public IP to the NSX for a localized exit to the Internet from each private cloud in its local region. Using multiple Azure VMware Solution private clouds in several Azure regions that need to communicate with each other and the Internet, it can be challenging to match an Azure VMware Solution private cloud with a security service in Azure. The difficulty is due to the way a default route from Azure works. + +## Next Steps + +[Enable Managed SNAT for Azure VMware Solution Workloads](enable-managed-snat-for-workloads.md) + +[Enable Public IP to the NSX Edge for Azure VMware Solution (Preview)](enable-public-ip-nsx-edge.md) + +[Disable Internet access or enable a default route](disable-internet-access.md) \ No newline at end of file diff --git a/articles/azure-vmware/configure-identity-source-vcenter.md b/articles/azure-vmware/configure-identity-source-vcenter.md index 3a7dda21d73d..ae591794ee40 100644 --- a/articles/azure-vmware/configure-identity-source-vcenter.md +++ b/articles/azure-vmware/configure-identity-source-vcenter.md @@ -3,9 +3,6 @@ title: Configure external identity source for vCenter Server description: Learn how to configure Active Directory over LDAP or LDAPS for vCenter Server as an external identity source. ms.topic: how-to ms.date: 04/22/2022 - - - --- # Configure external identity source for vCenter Server diff --git a/articles/azure-vmware/deploy-disaster-recovery-using-jetstream.md b/articles/azure-vmware/deploy-disaster-recovery-using-jetstream.md index 7cfe146ee453..2b72114c7091 100644 --- a/articles/azure-vmware/deploy-disaster-recovery-using-jetstream.md +++ b/articles/azure-vmware/deploy-disaster-recovery-using-jetstream.md @@ -233,7 +233,7 @@ Once JetStream DR MSA and JetStream VIB are installed on the Azure VMware Soluti 1. [Select the VMs](https://www.jetstreamsoft.com/portal/jetstream-knowledge-base/select-vms-for-protection/) you want to protect and then [start VM protection](https://www.jetstreamsoft.com/portal/jetstream-knowledge-base/start-vm-protection/). -For remaining configuration steps for JetStream DR, such as creating a failover runbook, invoking failover to the DR site, and invoking failback to the primary site, see the [JetStream Admin Guide documentation](https://www.jetstreamsoft.com/portal/jetstream-article-categories/product-manual/). +For remaining configuration steps for JetStream DR, such as creating a failover runbook, invoking failover to the DR site, and invoking failback to the primary site, see the [JetStream Admin Guide documentation](https://docs.delphix.com/docs51/delphix-jet-stream/jet-stream-admin-guide). ## Disable JetStream DR on an Azure VMware Solution cluster diff --git a/articles/azure-vmware/deploy-zerto-disaster-recovery.md b/articles/azure-vmware/deploy-zerto-disaster-recovery.md index 7d01fc90ce47..8ba7ca3852d7 100644 --- a/articles/azure-vmware/deploy-zerto-disaster-recovery.md +++ b/articles/azure-vmware/deploy-zerto-disaster-recovery.md @@ -123,7 +123,7 @@ You can reuse pre-existing Zerto product licenses for Azure VMware Solution envi ### How is Zerto supported? -Zerto disaster recovery is a solution that is sold and supported by Zerto. For any support issue with Zerto disaster recovery, always contact [Zerto support](https://www.zerto.com/company/support-and-service/support/). +Zerto disaster recovery is a solution that is sold and supported by Zerto. For any support issue with Zerto disaster recovery, always contact [Zerto support](https://www.zerto.com/support-and-services/). Zerto and Microsoft support teams will engage each other as needed to troubleshoot Zerto disaster recovery issues on Azure VMware Solution. diff --git a/articles/azure-vmware/disable-internet-access.md b/articles/azure-vmware/disable-internet-access.md new file mode 100644 index 000000000000..d507ed5b4bc3 --- /dev/null +++ b/articles/azure-vmware/disable-internet-access.md @@ -0,0 +1,34 @@ +--- +title: Disable internet access or enable a default route +description: This article explains how to disable internet access for Azure VMware Solution and enable default route for Azure VMware Solution. +ms.topic: how-to +ms.date: 05/12/2022 +--- +# Disable internet access or enable a default route +In this article, you'll learn how to disable Internet access or enable a default route for your Azure VMware Solution private cloud. There are multiple ways to set up a default route. You can use a Virtual WAN hub, Network Virtual Appliance in a Virtual Network, or use a default route from on-premise. If you don't set up a default route, there will be no Internet access to your Azure VMware Solution private cloud. + +With a default route setup, you can achieve the following tasks: +- Disable Internet access to your Azure VMware Solution private cloud. + + > [!Note] + > Ensure that a default route is not advertised from on-premises or Azure as that will override this setup. + +- Enable Internet access by generating a default route from Azure Firewall or third-party Network Virtual Appliance. +## Prerequisites +- If Internet access is required, a default route must be advertised from an Azure Firewall, Network Virtual Appliance or Virtual WAN Hub. +- Azure VMware Solution private cloud. +## Disable Internet access or enable a default route in the Azure portal +1. Log in to the Azure portal. +1. Search for **Azure VMware Solution** and select it. +1. Locate and select your Azure VMware Solution private cloud. +1. On the left navigation, under **Workload networking**, select **Internet connectivity**. +1. Select the **Don't connect or connect using default route from Azure** button and select **Save**. +If you don't have a default route from on-premises or from Azure, you have successfully disabled Internet connectivity to your Azure VMware Solution private cloud. + +## Next steps + +[Internet connectivity design considerations (Preview)](concepts-design-public-internet-access.md) + +[Enable Managed SNAT for Azure VMware Solution Workloads](enable-managed-snat-for-workloads.md) + +[Enable Public IP to the NSX Edge for Azure VMware Solution](enable-public-ip-nsx-edge.md) diff --git a/articles/azure-vmware/enable-managed-snat-for-workloads.md b/articles/azure-vmware/enable-managed-snat-for-workloads.md new file mode 100644 index 000000000000..d8d8176df6c3 --- /dev/null +++ b/articles/azure-vmware/enable-managed-snat-for-workloads.md @@ -0,0 +1,39 @@ +--- +title: Enable Managed SNAT for Azure VMware Solution Workloads +description: This article explains how to enable Managed SNAT for Azure VMware Solution Workloads. +ms.topic: how-to +ms.date: 05/12/2022 +--- +# Enable Managed SNAT for Azure VMware Solution workloads + +In this article, you'll learn how to enable Azure VMware Solution’s Managed Source NAT (SNAT) to connect to the Internet outbound. A SNAT service translates from RFC1918 space to the public Internet for simple outbound Internet access. The SNAT service won't work when you have a default route from Azure. + +With this capability, you: + +- Have a basic SNAT service with outbound Internet connectivity from your Azure VMware Solution private cloud. +- Have no control of outbound SNAT rules. +- Are unable to view connection logs. +- Have a limit of 128 000 concurrent connections. + +## Prerequisites +- Azure Solution VMware private cloud +- DNS Server configured on the NSX-T Datacenter + +## Reference architecture +The architecture shows Internet access to and from your Azure VMware Solution private cloud using a Public IP directly to the NSX Edge. +:::image type="content" source="media/public-ip-nsx-edge/architecture-internet-access-avs-public-ip.png" alt-text="Diagram that shows architecture of Internet access to and from your Azure VMware Solution Private Cloud using a Public IP directly to the NSX Edge." border="false" lightbox="media/public-ip-usage/architecture-internet-access-avs-public-ip.png"::: + +## Configure Outbound Internet access using Managed SNAT in the Azure portal + +1. Log in to the Azure portal and then search for and select **Azure VMware Solution**. +2. Select the Azure VMware Solution private cloud. +1. In the left navigation, under **Workload Networking**, select **Internet Connectivity**. +4. Select **Connect using SNAT** button and select **Save**. + You have successfully enabled outbound Internet access for your Azure VMware Solution private cloud using our Managed SNAT service. + +## Next steps +[Internet connectivity design considerations (Preview)](concepts-design-public-internet-access.md) + +[Enable Public IP to the NSX Edge for Azure VMware Solution (Preview)](enable-public-ip-nsx-edge.md) + +[Disable Internet access or enable a default route](disable-internet-access.md) diff --git a/articles/azure-vmware/enable-public-internet-access.md b/articles/azure-vmware/enable-public-internet-access.md index 10ea7a8ef8a8..f16c02fe4bab 100644 --- a/articles/azure-vmware/enable-public-internet-access.md +++ b/articles/azure-vmware/enable-public-internet-access.md @@ -54,8 +54,6 @@ In this scenario, you'll publish the IIS webserver to the internet. Use the publ 1. Select the Azure VMware Solution private cloud. - :::image type="content" source="media/public-ip-usage/avs-private-cloud-resource.png" alt-text="Screenshot of the Azure VMware Solution private cloud." lightbox="media/public-ip-usage/avs-private-cloud-resource.png"::: - 1. Under **Manage**, select **Connectivity**. :::image type="content" source="media/public-ip-usage/avs-private-cloud-manage-menu.png" alt-text="Screenshot of the Connectivity section." lightbox="media/public-ip-usage/avs-private-cloud-manage-menu.png"::: @@ -142,7 +140,7 @@ Once all components are deployed, you can see them in the added Resource group. 1. Select a hub from the list and select **Add**. - :::image type="content" source="media/public-ip-usage/secure-hubs-with-azure-firewall-polcy.png" alt-text="Screenshot that shows the selected hubs that will be converted to Secured Virtual Hubs." lightbox="media/public-ip-usage/secure-hubs-with-azure-firewall-polcy.png"::: + :::image type="content" source="media/public-ip-usage/secure-hubs-with-azure-firewall-policy.png" alt-text="Screenshot that shows the selected hubs that will be converted to Secured Virtual Hubs." lightbox="media/public-ip-usage/secure-hubs-with-azure-firewall-policy.png"::: 1. Select **Next: Tags**. diff --git a/articles/azure-vmware/enable-public-ip-nsx-edge.md b/articles/azure-vmware/enable-public-ip-nsx-edge.md new file mode 100644 index 000000000000..253348a02c36 --- /dev/null +++ b/articles/azure-vmware/enable-public-ip-nsx-edge.md @@ -0,0 +1,123 @@ +--- +title: Enable Public IP to the NSX Edge for Azure VMware Solution (Preview) +description: This article explains how to enable internet access for your Azure VMware Solution. +ms.topic: how-to +ms.date: 05/12/2022 +--- +# Enable Public IP to the NSX Edge for Azure VMware Solution (Preview) + +In this article, you'll learn how to enable Public IP to the NSX Edge for your Azure VMware Solution. + +>[!TIP] +>Before you enable Internet access to your Azure VMware Solution, review the [Internet connectivity design considerations](concepts-design-public-internet-access.md). + +Public IP to the NSX Edge is a feature in Azure VMware Solution that enables inbound and outbound internet access for your Azure VMware Solution environment. The Public IP is configured in Azure VMware Solution through the Azure portal and the NSX-T Data center interface within your Azure VMware Solution private cloud. +With this capability, you have the following features: +- A cohesive and simplified experience for reserving and using a Public IP down to the NSX Edge. +- The ability to receive up to 1000 or more Public IPs, enabling Internet access at scale. +- Inbound and outbound internet access for your workload VMs. +- DDoS Security protection against network traffic in and out of the Internet. +- HCX Migration support over the Public Internet. + +## Reference architecture +The architecture shows Internet access to and from your Azure VMware Solution private cloud using a Public IP directly to the NSX Edge. +:::image type="content" source="media/public-ip-nsx-edge/architecture-internet-access-avs-public-ip.png" alt-text="Diagram that shows architecture of Internet access to and from your Azure VMware Solution Private Cloud using a Public IP directly to the NSX Edge." border="false" lightbox="media/public-ip-nsx-edge/architecture-internet-access-avs-public-ip.png"::: + +## Configure a Public IP in the Azure portal +1. Log in to the Azure portal. +1. Search for and select Azure VMware Solution. +2. Select the Azure VMware Solution private cloud. +1. In the left navigation, under **Workload Networking**, select **Internet connectivity**. +4. Select the **Connect using Public IP down to the NSX-T Edge** button. + +>[!TIP] +>Before selecting a Public IP, ensure you understand the implications to your existing environment. For more information, see [Internet connectivity design considerations](concepts-design-public-internet-access.md) + +5. Select **Public IP**. + :::image type="content" source="media/public-ip-nsx-edge/public-ip-internet-connectivity.png" alt-text="Diagram that shows how to select public IP to the NSX Edge"::: +6. Enter the **Public IP name** and select a subnet size from the **Address space** dropdown and select **Configure**. +7. This Public IP should be configured within 20 minutes and will show the subnet. + :::image type="content" source="media/public-ip-nsx-edge/public-ip-subnet-internet-connectivity.png" alt-text="Diagram that shows Internet connectivity in Azure VMware Solution."::: +1. If you don't see the subnet, refresh the list. If the refresh fails, try the configuration again. + +9. After configuring the Public IP, select the **Connect using the Public IP down to the NSX-T Edge** checkbox to disable all other Internet options. +10. Select **Save**. + +You have successfully enabled Internet connectivity for your Azure VMware Solution private cloud and reserved a Microsoft allocated Public IP. You can now configure this Public IP down to the NSX Edge for your workloads. The NSX-T Datacenter is used for all VM communication. There are several options for configuring your reserved Public IP down to the NSX Edge. + +There are three options for configuring your reserved Public IP down to the NSX Edge: Outbound Internet Access for VMs, Inbound Internet Access for VMs, and Gateway Firewall used to Filter Traffic to VMs at T1 Gateways. + +### Outbound Internet access for VMs + +A Sourced Network Translation Service (SNAT) with Port Address Translation (PAT) is used to allow many VMs to one SNAT service. This connection means you can provide Internet connectivity for many VMs. + +**Add rule** +1. From your Azure VMware Solution private cloud, select **vCenter Credentials** +2. Locate your NSX-T URL and credentials. +3. Log in to **VMWare NSX-T**. +4. Navigate to **NAT Rules**. +5. Select the T1 Router. +1. select **ADD NAT RULE**. + +**Configure rule** + +1. Enter a name. +1. Select **SNAT**. +1. Optionally enter a source such as a subnet to SNAT or destination. +1. Enter the translated IP. This IP is from the range of Public IPs you reserved from the Azure VMware Solution Portal. +1. Optionally give the rule a higher priority number. This prioritization will move the rule further down the rule list to ensure more specific rules are matched first. +1. Click **SAVE**. + +Logging can be enabled by way of the logging slider. For more information on NSX-T NAT configuration and options, see the +[NSX-T NAT Administration Guide](https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/administration/GUID-7AD2C384-4303-4D6C-A44A-DEF45AA18A92.html) +### Inbound Internet Access for VMs +A Destination Network Translation Service (DNAT) is used to expose a VM on a specific Public IP address and/or a specific port. This service provides inbound internet access to your workload VMs. + +**Log in VMware NSX-T** +1. From your Azure VMware Solution private cloud, select **VMware credentials**. +2. Locate your NSX-T URL and credentials. +3. Log in to **VMware NSX-T**. + +**Configure the DNAT rule** + 1. Name the rule. + 1. Select **DNAT** as the action. + 1. Enter the reserved Public IP in the destination match. + 1. Enter the VM Private IP in the translated IP. This IP is from the range of Public IPs reserved from the Azure VMware Solution Portal. + 1. Select **SAVE**. + 1. Optionally, configure the Translated Port or source IP for more specific matches. + +The VM is now exposed to the internet on the specific Public IP and/or specific ports. + +### Gateway Firewall used to filter traffic to VMs at T1 Gateways + +You can provide security protection for your network traffic in and out of the public Internet through your Gateway Firewall. +1. From your Azure VMware Solution Private Cloud, select **VMware credentials** +2. Locate your NSX-T URL and credentials. +3. Log in to **VMware NSX-T**. +4. From the NSX-T home screen, select **Gateway Policies**. +5. Select **Gateway Specific Rules**, choose the T1 Gateway and select **ADD POLICY**. +6. Select **New Policy** and enter a policy name. +7. Select the Policy and select **ADD RULE**. +8. Configure the rule. + + 1. Select **New Rule**. + 1. Enter a descriptive name. + 1. Configure the source, destination, services, and action. + +1. Select **Match External Address** to apply firewall rules to the external address of a NAT rule. + +For example, the following rule is set to Match External Address, and this setting will allow SSH traffic inbound to the Public IP. + :::image type="content" source="media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity.png" alt-text="Screenshot Internet connectivity inbound Public IP." lightbox="media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity-expanded.png"::: + +If **Match Internal Address** was specified, the destination would be the internal or private IP address of the VM. +For more information on the NSX-T Gateway Firewall see the [NSX-T Gateway Firewall Administration Guide]( https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/administration/GUID-A52E1A6F-F27D-41D9-9493-E3A75EC35481.html) +The Distributed Firewall may also be used to filter traffic to VMs. This feature is outside the scope of this document. The [NSX-T Distributed Firewall Administration Guide]( https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/administration/GUID-6AB240DB-949C-4E95-A9A7-4AC6EF5E3036.html) . + + +## Next steps +[Internet connectivity design considerations (Preview)](concepts-design-public-internet-access.md) + +[Enable Managed SNAT for Azure VMware Solution Workloads (Preview)](enable-managed-snat-for-workloads.md) + +[Disable Internet access or enable a default route](disable-internet-access.md) + diff --git a/articles/azure-vmware/index.yml b/articles/azure-vmware/index.yml index 936fdd9344d5..b92fd3628fc3 100644 --- a/articles/azure-vmware/index.yml +++ b/articles/azure-vmware/index.yml @@ -33,6 +33,8 @@ landingContent: url: concepts-api-management.md - text: Hub and spoke url: concepts-hub-and-spoke.md + - text: Internet connectivity design considerations + url: concepts-design-public-internet-access.md - text: Network design considerations url: concepts-network-design-considerations.md - text: Networking and interconnectivity diff --git a/articles/azure-vmware/media/public-ip-nsx-edge/architecture-internet-access-avs-public-ip.png b/articles/azure-vmware/media/public-ip-nsx-edge/architecture-internet-access-avs-public-ip.png new file mode 100644 index 000000000000..6d7e1136d8fa Binary files /dev/null and b/articles/azure-vmware/media/public-ip-nsx-edge/architecture-internet-access-avs-public-ip.png differ diff --git a/articles/azure-vmware/media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity-expanded.png b/articles/azure-vmware/media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity-expanded.png new file mode 100644 index 000000000000..0ba110c260ee Binary files /dev/null and b/articles/azure-vmware/media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity-expanded.png differ diff --git a/articles/azure-vmware/media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity.png b/articles/azure-vmware/media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity.png new file mode 100644 index 000000000000..b39248f76bf5 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-nsx-edge/gateway-specific-rules-match-external-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-nsx-edge/public-ip-internet-connectivity.png b/articles/azure-vmware/media/public-ip-nsx-edge/public-ip-internet-connectivity.png new file mode 100644 index 000000000000..481ed65ea193 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-nsx-edge/public-ip-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-nsx-edge/public-ip-subnet-internet-connectivity.png b/articles/azure-vmware/media/public-ip-nsx-edge/public-ip-subnet-internet-connectivity.png new file mode 100644 index 000000000000..e8f0d25335d3 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-nsx-edge/public-ip-subnet-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/add-number-of-ip-addresses-required.png b/articles/azure-vmware/media/public-ip-usage/add-number-of-ip-addresses-required.png index fc0e44054ff8..e86b61d46f9e 100644 Binary files a/articles/azure-vmware/media/public-ip-usage/add-number-of-ip-addresses-required.png and b/articles/azure-vmware/media/public-ip-usage/add-number-of-ip-addresses-required.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/architecture-internet-access-avs-public-ip.png b/articles/azure-vmware/media/public-ip-usage/architecture-internet-access-avs-public-ip.png new file mode 100644 index 000000000000..6d7e1136d8fa Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/architecture-internet-access-avs-public-ip.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/avs-private-cloud-resource.png b/articles/azure-vmware/media/public-ip-usage/avs-private-cloud-resource.png deleted file mode 100644 index 7f2297b167e7..000000000000 Binary files a/articles/azure-vmware/media/public-ip-usage/avs-private-cloud-resource.png and /dev/null differ diff --git a/articles/azure-vmware/media/public-ip-usage/private-cloud-internet-connectivity.png b/articles/azure-vmware/media/public-ip-usage/private-cloud-internet-connectivity.png new file mode 100644 index 000000000000..b1e75f9d01a9 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/private-cloud-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/private-cloud-save-snat-internet-connectivity.png b/articles/azure-vmware/media/public-ip-usage/private-cloud-save-snat-internet-connectivity.png new file mode 100644 index 000000000000..ebf97e404885 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/private-cloud-save-snat-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/private-cloud-workload-internet-connectivity.png b/articles/azure-vmware/media/public-ip-usage/private-cloud-workload-internet-connectivity.png new file mode 100644 index 000000000000..214e9b7997e6 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/private-cloud-workload-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/public-ip-architecture-diagram.png b/articles/azure-vmware/media/public-ip-usage/public-ip-architecture-diagram.png index 20d8be67a5f7..6d271395f28e 100644 Binary files a/articles/azure-vmware/media/public-ip-usage/public-ip-architecture-diagram.png and b/articles/azure-vmware/media/public-ip-usage/public-ip-architecture-diagram.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/public-ip-block-internet-connectivity.png b/articles/azure-vmware/media/public-ip-usage/public-ip-block-internet-connectivity.png new file mode 100644 index 000000000000..f08ea165a9c4 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/public-ip-block-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/public-ip-subnet-internet-connectivity.png b/articles/azure-vmware/media/public-ip-usage/public-ip-subnet-internet-connectivity.png new file mode 100644 index 000000000000..e8f0d25335d3 Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/public-ip-subnet-internet-connectivity.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/secure-hubs-with-azure-firewall-polcy.png b/articles/azure-vmware/media/public-ip-usage/secure-hubs-with-azure-firewall-polcy.png deleted file mode 100644 index 1ecaf653624d..000000000000 Binary files a/articles/azure-vmware/media/public-ip-usage/secure-hubs-with-azure-firewall-polcy.png and /dev/null differ diff --git a/articles/azure-vmware/media/public-ip-usage/secure-hubs-with-azure-firewall-policy.png b/articles/azure-vmware/media/public-ip-usage/secure-hubs-with-azure-firewall-policy.png new file mode 100644 index 000000000000..d5075c32cdcc Binary files /dev/null and b/articles/azure-vmware/media/public-ip-usage/secure-hubs-with-azure-firewall-policy.png differ diff --git a/articles/azure-vmware/media/public-ip-usage/virtual-hub-page-public-ip-configuration.png b/articles/azure-vmware/media/public-ip-usage/virtual-hub-page-public-ip-configuration.png index 39bce924a644..dafc16f5ab4a 100644 Binary files a/articles/azure-vmware/media/public-ip-usage/virtual-hub-page-public-ip-configuration.png and b/articles/azure-vmware/media/public-ip-usage/virtual-hub-page-public-ip-configuration.png differ diff --git a/articles/azure-vmware/toc.yml b/articles/azure-vmware/toc.yml index 217dd4c1516f..81ec63c4b6ae 100644 --- a/articles/azure-vmware/toc.yml +++ b/articles/azure-vmware/toc.yml @@ -44,6 +44,8 @@ href: concepts-api-management.md - name: Hub and spoke href: concepts-hub-and-spoke.md + - name: Internet connectivity design considerations + href: concepts-design-public-internet-access.md - name: Network design considerations href: concepts-network-design-considerations.md - name: Networking and interconnectivity @@ -104,6 +106,16 @@ href: configure-identity-source-vcenter.md - name: Configure GitHub Enterprise Server href: configure-github-enterprise-server.md + - name: Configure Internet connectivity + items: + - name: Enable Managed SNAT for Azure VMware Solution Workloads + href: enable-managed-snat-for-workloads.md + - name: Enable Public IP to the NSX Edge for Azure VMware Solution + href: enable-public-ip-nsx-edge.md + - name: Disable Internet access or enable a default route + href: disable-internet-access.md + - name: Enable public internet access + href: enable-public-internet-access.md - name: Configure networking items: - name: Configure DHCP server or relay @@ -120,8 +132,6 @@ href: configure-port-mirroring-azure-vmware-solution.md - name: Configure a site-to-site VPN in vWAN href: configure-site-to-site-vpn-gateway.md - - name: Enable public internet access - href: enable-public-internet-access.md - name: HCX Mobility Optimized Networking (MON) guidance href: vmware-hcx-mon-guidance.md - name: Configure storage policies diff --git a/articles/azure-web-pubsub/reference-server-sdk-python.md b/articles/azure-web-pubsub/reference-server-sdk-python.md index 28d784b0aea9..e3b027b84e24 100644 --- a/articles/azure-web-pubsub/reference-server-sdk-python.md +++ b/articles/azure-web-pubsub/reference-server-sdk-python.md @@ -1,18 +1,19 @@ --- title: Reference - Python server SDK for Azure Web PubSub -description: This reference describes the Python server SDK for the Azure Web PubSub service. +description: Learn about the Python server SDK for the Azure Web PubSub service. You can use this library in your app server to manage the WebSocket client connections. author: vicancy ms.author: lianwei ms.service: azure-web-pubsub -ms.topic: conceptual -ms.date: 11/08/2021 +ms.topic: how-to +ms.custom: kr2b-contr-experiment +ms.date: 05/23/2022 --- # Azure Web PubSub service client library for Python [Azure Web PubSub Service](./index.yml) is an Azure-managed service that helps developers easily build web applications with real-time features and publish-subscribe pattern. Any scenario that requires real-time publish-subscribe messaging between server and clients or among clients can use Azure Web PubSub service. Traditional real-time features that often require polling from server or submitting HTTP requests can also use Azure Web PubSub service. -You can use this library in your app server side to manage the WebSocket client connections, as shown in below diagram: +You can use this library in your app server side to manage the WebSocket client connections, as shown in following diagram: ![The overflow diagram shows the overflow of using the service client library.](media/sdk-reference/service-client-overflow.png) @@ -21,31 +22,29 @@ Use this library to: - Send messages to hubs and groups. - Send messages to particular users and connections. - Organize users and connections into groups. -- Close connections -- Grant, revoke, and check permissions for an existing connection +- Close connections. +- Grant, revoke, and check permissions for an existing connection. -[Source code](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/webpubsub/azure-messaging-webpubsubservice) | [Package (Pypi)][package] | [API reference documentation](/python/api/overview/azure/messaging-webpubsubservice-readme) | [Product documentation][webpubsubservice_docs] +## Prerequisites -> [!IMPORTANT] -> Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691. +- Python 3.6 or later is required to use this package. +- You need an [Azure subscription][azure_sub] and an [Azure WebPubSub service instance][webpubsubservice_docs] to use this package. +- An existing Azure Web PubSub service instance. -## Getting started +> [!IMPORTANT] +> Azure SDK Python packages support for Python 2.7 is ending 01 January 2022. For more information, see [Azure SDK Python packages support](https://github.com/Azure/azure-sdk-for-python/issues/20691). -### Prerequisites +## Install the package -- Python 2.7, or 3.6 or later is required to use this package. -- You need an [Azure subscription][azure_sub] and a [Azure WebPubSub service instance][webpubsubservice_docs] to use this package. -- An existing Azure Web PubSub service instance. - -### 1. Install the package +Use this command to install the package: ```bash python -m pip install azure-messaging-webpubsubservice ``` -### 2. Create and authenticate a WebPubSubServiceClient +## Create and authenticate a WebPubSubServiceClient -You can authenticate the `WebPubSubServiceClient` using [connection string][connection_string]: +You can authenticate the `WebPubSubServiceClient` using a [connection string][connection_string]: ```python >>> from azure.messaging.webpubsubservice import WebPubSubServiceClient @@ -53,7 +52,7 @@ You can authenticate the `WebPubSubServiceClient` using [connection string][conn >>> service = WebPubSubServiceClient.from_connection_string(connection_string='', hub='hub') ``` -Or using the service endpoint and the access key: +Or use the service endpoint and the access key: ```python >>> from azure.messaging.webpubsubservice import WebPubSubServiceClient @@ -62,11 +61,11 @@ Or using the service endpoint and the access key: >>> service = WebPubSubServiceClient(endpoint='', hub='hub', credential=AzureKeyCredential("")) ``` -Or using [Azure Active Directory][aad_doc]: +Or use [Azure Active Directory][aad_doc] (Azure AD): -1. [pip][pip] install [`azure-identity`][azure_identity_pip] -2. Follow the document to [enable AAD authentication on your Webpubsub resource][aad_doc] -3. Update code to use [DefaultAzureCredential][default_azure_credential] +1. [pip][pip] install [`azure-identity`][azure_identity_pip]. +2. [Enable Azure AD authentication on your Webpubsub resource][aad_doc]. +3. Update code to use [DefaultAzureCredential][default_azure_credential]. ```python >>> from azure.messaging.webpubsubservice import WebPubSubServiceClient @@ -88,7 +87,7 @@ Or using [Azure Active Directory][aad_doc]: }) ``` -The WebSocket client will receive JSON serialized text: `{"from": "user1", "data": "Hello world"}`. +The WebSocket client receives JSON serialized text: `{"from": "user1", "data": "Hello world"}`. ### Broadcast messages in plain-text format @@ -98,7 +97,7 @@ The WebSocket client will receive JSON serialized text: `{"from": "user1", "data >>> service.send_to_all(message = 'Hello world', content_type='text/plain') ``` -The WebSocket client will receive text: `Hello world`. +The WebSocket client receives text: `Hello world`. ### Broadcast messages in binary format @@ -109,14 +108,12 @@ The WebSocket client will receive text: `Hello world`. >>> service.send_to_all(message=io.StringIO('Hello World'), content_type='application/octet-stream') ``` -The WebSocket client will receive binary text: `b'Hello world'`. - -## Troubleshooting +The WebSocket client receives binary text: `b'Hello world'`. -### Logging +## Logging This SDK uses Python standard logging library. -You can configure logging print out debugging information to the stdout or anywhere you want. +You can configure logging to print debugging information to the `stdout` or anywhere you want. ```python import sys @@ -139,35 +136,30 @@ credential = DefaultAzureCredential() service = WebPubSubServiceClient(endpoint=endpoint, hub='hub', credential=credential, logging_enable=True) ``` -Similarly, `logging_enable` can enable detailed logging for a single call, -even when it isn't enabled for the WebPubSubServiceClient: +Similarly, `logging_enable` can enable detailed logging for a single call, even when it isn't enabled for the `WebPubSubServiceClient`: ```python result = service.send_to_all(..., logging_enable=True) ``` -Http request and response details are printed to stdout with this logging config. +HTTP request and response details are printed to `stdout` with this logging configuration. ## Next steps -Check [more samples here][samples]. +- [Source code](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/webpubsub/azure-messaging-webpubsubservice) +- [Package (Pypi)][package] +- [API reference documentation](/python/api/overview/azure/messaging-webpubsubservice-readme) +- [Product documentation][webpubsubservice_docs] + +For more samples, see [Azure Web PubSub service client library for Python Samples][samples]. ## Contributing -This project welcomes contributions and suggestions. Most contributions require -you to agree to a Contributor License Agreement (CLA) declaring that you have -the right to, and actually do, grant us the rights to use your contribution. -For details, visit https://cla.microsoft.com. +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For more information, see [Contributor License Agreement](https://cla.microsoft.com). -When you submit a pull request, a CLA-bot will automatically determine whether -you need to provide a CLA and decorate the PR appropriately (e.g., label, -comment). Simply follow the instructions provided by the bot. You will only -need to do this once across all repos using our CLA. +When you submit a pull request, a CLA-bot automatically determines whether you need to provide a CLA and decorate the PR appropriately, for example, "label", "comment". Follow the instructions provided by the bot. You only need to do this action once across all repos using our CLA. -This project has adopted the -[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, -see the Code of Conduct FAQ or contact opencode@microsoft.com with any -additional questions or comments. +This project has adopted the Microsoft Open Source Code of Conduct. For more information, see [Code of Conduct][code_of_conduct] FAQ or contact [Open Source Conduct Team](mailto:opencode@microsoft.com) with questions or comments. [webpubsubservice_docs]: ./index.yml diff --git a/articles/backup/backup-azure-sap-hana-database.md b/articles/backup/backup-azure-sap-hana-database.md index 1c15b5bfbeb6..9924cc778ae7 100644 --- a/articles/backup/backup-azure-sap-hana-database.md +++ b/articles/backup/backup-azure-sap-hana-database.md @@ -2,7 +2,7 @@ title: Back up an SAP HANA database to Azure with Azure Backup description: In this article, learn how to back up an SAP HANA database to Azure virtual machines with the Azure Backup service. ms.topic: conceptual -ms.date: 04/28/2022 +ms.date: 06/01/2022 author: v-amallick ms.service: backup ms.author: v-amallick @@ -40,7 +40,7 @@ The following table lists the various alternatives you can use for establishing | Private endpoints | Allow backups over private IPs inside the virtual network

    Provide granular control on the network and vault side | Incurs standard private endpoint [costs](https://azure.microsoft.com/pricing/details/private-link/) | | NSG service tags | Easier to manage as range changes are automatically merged

    No additional costs | Can be used with NSGs only

    Provides access to the entire service | | Azure Firewall FQDN tags | Easier to manage since the required FQDNs are automatically managed | Can be used with Azure Firewall only | -| Allow access to service FQDNs/IPs | No additional costs

    Works with all network security appliances and firewalls | A broad set of IPs or FQDNs may be required to be accessed | +| Allow access to service FQDNs/IPs | No additional costs.

    Works with all network security appliances and firewalls.

    You can also use service endpoints for *Storage* and *Azure Active Directory*. However, for Azure Backup, you need to assign the access to the corresponding IPs/FQDNs. | A broad set of IPs or FQDNs may be required to be accessed. | | [Virtual Network Service Endpoint](../virtual-network/virtual-network-service-endpoints-overview.md) | Can be used for Azure Storage (= Recovery Services vault).

    Provides large benefit to optimize performance of data plane traffic. | Can’t be used for Azure AD, Azure Backup service. | | Network Virtual Appliance | Can be used for Azure Storage, Azure AD, Azure Backup service.

    **Data plane**
    • Azure Storage: `*.blob.core.windows.net`, `*.queue.core.windows.net`, `*.blob.storage.azure.net`


    **Management plane**
    • Azure AD: Allow access to FQDNs mentioned in sections 56 and 59 of [Microsoft 365 Common and Office Online](/microsoft-365/enterprise/urls-and-ip-address-ranges?view=o365-worldwide&preserve-view=true#microsoft-365-common-and-office-online).
    • Azure Backup service: `.backup.windowsazure.com`

    Learn more about [Azure Firewall service tags](../firewall/fqdn-tags.md). | Adds overhead to data plane traffic and decrease throughput/performance. | diff --git a/articles/backup/backup-azure-vm-backup-faq.yml b/articles/backup/backup-azure-vm-backup-faq.yml index a1dcc8749de8..1cd02a7395b2 100644 --- a/articles/backup/backup-azure-vm-backup-faq.yml +++ b/articles/backup/backup-azure-vm-backup-faq.yml @@ -4,7 +4,7 @@ metadata: description: In this article, discover answers to common questions about backing up Azure VMs with the Azure Backup service. ms.topic: faq ms.service: backup - ms.date: 05/06/2022 + ms.date: 05/27/2022 author: v-amallick ms.author: v-amallick @@ -26,7 +26,7 @@ sections: To improve backup performance see, [backup best practices](./backup-azure-vms-introduction.md#best-practices); [Backup considerations](./backup-azure-vms-introduction.md#backup-and-restore-considerations) and [Backup Performance](./backup-azure-vms-introduction.md#backup-performance) - Although the total backup time for incremental backups is less than 24 hours, that might not be the case for the first backup. + Although the total backup time for incremental backups is less than 24 hours that might not be the case for the first backup. - question: Is the backup cost included in the VM cost? answer: | @@ -125,7 +125,7 @@ sections: - question: Does Azure Backup interfere with application performance? answer: | - Creating a VM Snapshot takes few minutes, and there will be a very minimal interference on application performance at this stage. But, data transfer to a vault takes a couple of hours; so we recommend to schedule backups during off business hours. Learn more about [best practices for backup and restore](./backup-azure-vms-introduction.md#backup-and-restore-considerations). + Creating a VM Snapshot takes few minutes, and there will be a very minimal interference on application performance at this stage. But, data transfer to a vault takes a couple of hours; so we recommend scheduling backups during off business hours. Learn more about [best practices for backup and restore](./backup-azure-vms-introduction.md#backup-and-restore-considerations). - question: Will a new disk added to VM be backed up automatically? answer: | @@ -140,6 +140,10 @@ sections: answer: | Yes, you can do this when *Transfer data to vault* phase is in progress. + - question: Does Azure Backup take backup of keys for ADE encrypted VMs and restore it along with the restored disk? + answer: | + Azure Backup backs up encryption keys and secrets of the backup data. Generally, the keys are not restored in the Key vault, but Azure Backup allows restoring the keys during the loss of keys. + - name: Restore questions: - question: How do I decide whether to restore disks only or a full VM? @@ -217,6 +221,10 @@ sections: answer: | Yes, you can delete these files once the restoration process is complete. By default, Azure Backup retains these files for future use. + - question: How do I run restore operation for Cross Region Restore (CRR) of ADE encrypted VMs? + answer: | + The encrypted keys are not expected to be present in the target region as part of Cross Regions Restore (CRR). Therefore, you need to restore the encrypted keys and secrets using the restored file. When the restore is complete, you can create Azure encrypted VM using restored disks. + - name: Manage VM backups questions: - question: What happens if I modify a backup policy? @@ -262,7 +270,7 @@ sections: One way to view the retention settings for your backups, is to navigate to the backup item [dashboard](./backup-azure-manage-vms.md#view-vms-on-the-dashboard) for your VM, in the Azure portal. Selecting the link to its backup policy helps you view the retention duration of all the daily, weekly, monthly and yearly retention points associated with the VM. - You can also use [Backup Explorer](./monitor-azure-backup-with-backup-explorer.md) to view the retention settings for all your VMs within a single pane of glass. Navigate to Backup Explorer from any Recovery Services vault, go to the **Backup Items** tab and select the Advanced View to see detailed retention information for each VM. + You can also use [Backup Explorer](./monitor-azure-backup-with-backup-explorer.md) to view the retention settings for all your VMs within a single pane of glass. Go to Backup Explorer from any Recovery Services vault, go to the **Backup Items** tab and select the Advanced View to see detailed retention information for each VM. - question: When the snapshot is moved from a storage account to a vault, how is encryption in the transit managed? answer: Azure VM Backup uses [HTTPS communication for encryption in transit](guidance-best-practices.md#encryption-of-data-in-transit-and-at-rest). The data transfer uses Azure fabric (and not public endpoints), which do not need Internet access for VM backup. @@ -305,6 +313,10 @@ sections: answer: | This error appears when you try to start a VM after creating an Azure VM from a non-Marketplace image or swap the OS disk of a VM with a non-Marketplace image, and then the VM deployment fails. To resolve this issue, remove the plan information from the VM. + - question: How do I manage key rotations? How to ensure which key is used during backup and if it’s present to be used with the restored VM? + answer: | + Azure Backup backs up the secrets and KEK data of the key version during backup, and restores the same. However, booting ADE VMs with older version keys is also possible. + diff --git a/articles/backup/backup-sql-server-database-azure-vms.md b/articles/backup/backup-sql-server-database-azure-vms.md index b9444bf11804..c0a4d6e5a1c9 100644 --- a/articles/backup/backup-sql-server-database-azure-vms.md +++ b/articles/backup/backup-sql-server-database-azure-vms.md @@ -2,7 +2,7 @@ title: Back up multiple SQL Server VMs from the vault description: In this article, learn how to back up SQL Server databases on Azure virtual machines with Azure Backup from the Recovery Services vault ms.topic: conceptual -ms.date: 04/28/2022 +ms.date: 06/01/2022 author: v-amallick ms.service: backup ms.author: v-amallick @@ -48,7 +48,7 @@ The following table lists the various alternatives you can use for establishing | Private endpoints | Allow backups over private IPs inside the virtual network

    Provide granular control on the network and vault side | Incurs standard private endpoint [costs](https://azure.microsoft.com/pricing/details/private-link/) | | NSG service tags | Easier to manage as range changes are automatically merged

    No additional costs | Can be used with NSGs only

    Provides access to the entire service | | Azure Firewall FQDN tags | Easier to manage since the required FQDNs are automatically managed | Can be used with Azure Firewall only | -| Allow access to service FQDNs/IPs | No additional costs

    Works with all network security appliances and firewalls | A broad set of IPs or FQDNs may be required to be accessed | +| Allow access to service FQDNs/IPs | No additional costs.

    Works with all network security appliances and firewalls.

    You can also use service endpoints for *Storage* and *Azure Active Directory*. However, for Azure Backup, you need to assign the access to the corresponding IPs/FQDNs. | A broad set of IPs or FQDNs may be required to be accessed. | | Use an HTTP proxy | Single point of internet access to VMs | Additional costs to run a VM with the proxy software | The following sections provide more details around using these options. diff --git a/articles/bastion/connect-ip-address.md b/articles/bastion/connect-ip-address.md index 7642b5e3d39d..5048eea46624 100644 --- a/articles/bastion/connect-ip-address.md +++ b/articles/bastion/connect-ip-address.md @@ -53,7 +53,7 @@ Before you begin these steps, verify that you have the following environment set 1. To connect to a VM using a specified private IP address, you make the connection from Bastion to the VM, not directly from the VM page. On your Bastion page, select **Connect** to open the Connect page. -1. On the Bastion **Connect** page, for **Hostname**, enter the private IP address of the target VM. +1. On the Bastion **Connect** page, for **IP address**, enter the private IP address of the target VM. :::image type="content" source="./media/connect-ip-address/ip-address.png" alt-text="Screenshot of the Connect using Azure Bastion page." lightbox="./media/connect-ip-address/ip-address.png"::: diff --git a/articles/batch/scripts/batch-cli-sample-add-application.md b/articles/batch/scripts/batch-cli-sample-add-application.md index 3782ccc17acd..37699a727ad0 100644 --- a/articles/batch/scripts/batch-cli-sample-add-application.md +++ b/articles/batch/scripts/batch-cli-sample-add-application.md @@ -2,33 +2,61 @@ title: Azure CLI Script Example - Add an Application in Batch | Microsoft Docs description: Learn how to add an application for use with an Azure Batch pool or a task using the Azure CLI. ms.topic: sample -ms.date: 09/17/2021 +ms.date: 05/24/2022 ms.custom: devx-track-azurecli, seo-azure-cli keywords: batch, azure cli samples, azure cli code samples, azure cli script samples --- # CLI example: Add an application to an Azure Batch account -This script demonstrates how to add an application for use with an Azure Batch pool or task. To set up an application to add to your Batch account, package your executable, together with any dependencies, into a zip file. +This script demonstrates how to add an application for use with an Azure Batch pool or task. To set up an application to add to your Batch account, package your executable, together with any dependencies, into a zip file. + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] - - This tutorial requires version 2.0.20 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +## Sample script + +[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] + +### Create batch account and new application + +:::code language="azurecli" source="~/azure_cli_scripts/batch/add-application/add-application.sh" id="FullScript"::: + +### Create batch application package -## Example script +An application can reference multiple application executable packages of different versions. The executables and any dependencies need to be zipped up for the package. Once uploaded, the CLI attempts to activate the package so that it's ready for use. -[!code-azurecli-interactive[main](../../../cli_scripts/batch/add-application/add-application.sh "Add Application")] +```azurecli +az batch application package create \ + --resource-group $resourceGroup \ + --name $batchAccount \ + --application-name "MyApplication" \ + --package-file my-application-exe.zip \ + --version-name 1.0 +``` + +### Update the application + +Update the application to assign the newly added application package as the default version. + +```azurecli +az batch application set \ + --resource-group $resourceGroup \ + --name $batchAccount \ + --application-name "MyApplication" \ + --default-version 1.0 +``` -## Clean up deployment +## Clean up resources -Run the following command to remove the -resource group and all resources associated with it. +[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] -```azurecli-interactive -az group delete --name myResourceGroup +```azurecli +az group delete --name $resourceGroup ``` -## Script explanation +## Sample reference This script uses the following commands. Each command in the table links to command-specific documentation. diff --git a/articles/batch/scripts/batch-cli-sample-create-account.md b/articles/batch/scripts/batch-cli-sample-create-account.md index 7f8139f10aa9..135b192d2e54 100644 --- a/articles/batch/scripts/batch-cli-sample-create-account.md +++ b/articles/batch/scripts/batch-cli-sample-create-account.md @@ -1,8 +1,8 @@ --- title: Azure CLI Script Example - Create Batch account - Batch service | Microsoft Docs -description: Learn how to create a Batch account in Batch service mode with this Azure CLI script example. This also script shows how to query or update various properties of the account. +description: Learn how to create a Batch account in Batch service mode with this Azure CLI script example. This script also shows how to query or update various properties of the account. ms.topic: sample -ms.date: 09/17/2021 +ms.date: 05/24/2022 ms.custom: devx-track-azurecli, seo-azure-cli keywords: batch, azure cli samples, azure cli code samples, azure cli script samples --- @@ -10,27 +10,29 @@ keywords: batch, azure cli samples, azure cli code samples, azure cli script sam # CLI example: Create a Batch account in Batch service mode This script creates an Azure Batch account in Batch service mode and shows how to query or update various properties of the account. When you create a Batch account in the default Batch service mode, its compute nodes are assigned internally by the Batch -service. Allocated compute nodes are subject to a separate vCPU (core) quota and the account can be -authenticated either via shared key credentials or an Azure Active Directory token. +service. Allocated compute nodes are subject to a separate vCPU (core) quota and the account can be authenticated either via shared key credentials or an Azure Active Directory token. + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] -- This tutorial requires version 2.0.20 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +## Sample script + +[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] -## Example script +### Run the script -[!code-azurecli-interactive[main](../../../cli_scripts/batch/create-account/create-account.sh "Create Account")] +:::code language="azurecli" source="~/azure_cli_scripts/batch/create-account/create-account.sh" id="FullScript"::: -## Clean up deployment +## Clean up resources -Run the following command to remove the -resource group and all resources associated with it. +[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] -```azurecli-interactive -az group delete --name myResourceGroup +```azurecli +az group delete --name $resourceGroup ``` -## Script explanation +## Sample reference This script uses the following commands. Each command in the table links to command-specific documentation. diff --git a/articles/batch/scripts/batch-cli-sample-create-user-subscription-account.md b/articles/batch/scripts/batch-cli-sample-create-user-subscription-account.md index ccfa51a25e88..f46fd5510342 100644 --- a/articles/batch/scripts/batch-cli-sample-create-user-subscription-account.md +++ b/articles/batch/scripts/batch-cli-sample-create-user-subscription-account.md @@ -2,7 +2,7 @@ title: Azure CLI Script Example - Create Batch account - user subscription | Microsoft Docs description: Learn how to create an Azure Batch account in user subscription mode. This account allocates compute nodes into your subscription. ms.topic: sample -ms.date: 09/17/2021 +ms.date: 05/24/2022 ms.custom: devx-track-azurecli, seo-azure-cli keywords: batch, azure cli samples, azure cli examples, azure cli code samples --- @@ -11,23 +11,27 @@ keywords: batch, azure cli samples, azure cli examples, azure cli code samples This script creates an Azure Batch account in user subscription mode. An account that allocates compute nodes into your subscription must be authenticated via an Azure Active Directory token. The compute nodes allocated count toward your subscription's vCPU (core) quota. +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] + [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] -- This tutorial requires version 2.0.20 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +## Sample script + +[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] -## Example script +### Run the script -[!code-azurecli-interactive[main](../../../cli_scripts/batch/create-account/create-account-user-subscription.sh "Create Account using user subscription")] +:::code language="azurecli" source="~/azure_cli_scripts/batch/create-account/create-account-user-subscription.sh" id="FullScript"::: -## Clean up deployment +## Clean up resources -Run the following command to remove the resource group and all resources associated with it. +[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] -```azurecli-interactive -az group delete --name myResourceGroup +```azurecli +az group delete --name $resourceGroup ``` -## Script explanation +## Sample reference This script uses the following commands. Each command in the table links to command-specific documentation. diff --git a/articles/batch/scripts/batch-cli-sample-manage-linux-pool.md b/articles/batch/scripts/batch-cli-sample-manage-linux-pool.md index 61050c82a197..8c5d4dffa087 100644 --- a/articles/batch/scripts/batch-cli-sample-manage-linux-pool.md +++ b/articles/batch/scripts/batch-cli-sample-manage-linux-pool.md @@ -2,7 +2,7 @@ title: Azure CLI Script Example - Linux Pool in Batch | Microsoft Docs description: Learn the commands available in the Azure CLI to create and manage a pool of Linux compute nodes in Azure Batch. ms.topic: sample -ms.date: 09/17/2021 +ms.date: 05/24/2022 ms.custom: devx-track-azurecli, seo-azure-cli keywords: linux, azure cli samples, azure cli code samples, azure cli script samples --- @@ -11,24 +11,48 @@ keywords: linux, azure cli samples, azure cli code samples, azure cli script sam This script demonstrates some of the commands available in the Azure CLI to create and manage a pool of Linux compute nodes in Azure Batch. +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] + [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] -- This tutorial requires version 2.0.20 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +## Sample script + +[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] + +### To create a Linux pool in Azure Batch + +:::code language="azurecli" source="~/azure_cli_scripts/batch/manage-pool/manage-pool-linux.sh" id="FullScript"::: + +### To reboot a batch node -## Example script +If a particular node in the pool is having issues, it can be rebooted or reimaged. The ID of the node can be retrieved with the list command above. A typical node ID is in the format `tvm-xxxxxxxxxx_1-`. -[!code-azurecli-interactive[main](../../../cli_scripts/batch/manage-pool/manage-pool-linux.sh "Manage Linux Virtual Machine Pool")] +```azurecli +az batch node reboot \ + --pool-id mypool-linux \ + --node-id tvm-123_1-20170316t000000z +``` + +### To delete a batch node + +One or more compute nodes can be deleted from the pool, and any work already assigned to it can be re-allocated to another node. + +```azurecli +az batch node delete \ + --pool-id mypool-linux \ + --node-list tvm-123_1-20170316t000000z tvm-123_2-20170316t000000z \ + --node-deallocation-option requeue +``` -## Clean up deployment +## Clean up resources -Run the following command to remove the -resource group and all resources associated with it. +[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] -```azurecli-interactive -az group delete --name myResourceGroup +```azurecli +az group delete --name $resourceGroup ``` -## Script explanation +## Sample reference This script uses the following commands. Each command in the table links to command-specific documentation. diff --git a/articles/batch/scripts/batch-cli-sample-manage-windows-pool.md b/articles/batch/scripts/batch-cli-sample-manage-windows-pool.md index 056dd5502980..5ec90314f674 100644 --- a/articles/batch/scripts/batch-cli-sample-manage-windows-pool.md +++ b/articles/batch/scripts/batch-cli-sample-manage-windows-pool.md @@ -2,7 +2,7 @@ title: Azure CLI Script Example - Windows Pool in Batch | Microsoft Docs description: Learn some of the commands available in the Azure CLI to create and manage a pool of Windows compute nodes in Azure Batch. ms.topic: sample -ms.date: 09/17/2021 +ms.date: 05/24/2022 ms.custom: devx-track-azurecli, seo-azure-cli keywords: windows pool, azure cli samples, azure cli code samples, azure cli script samples --- @@ -10,27 +10,29 @@ keywords: windows pool, azure cli samples, azure cli code samples, azure cli scr # CLI example: Create and manage a Windows pool in Azure Batch This script demonstrates some of the commands available in the Azure CLI to create and -manage a pool of Windows compute nodes in Azure Batch. A Windows pool can be configured in two ways, with either a Cloud Services configuration -or a Virtual Machine configuration. This example shows how to create a Windows pool with the Cloud Services configuration. +manage a pool of Windows compute nodes in Azure Batch. A Windows pool can be configured in two ways, with either a Cloud Services configuration or a Virtual Machine configuration. This example shows how to create a Windows pool with the Cloud Services configuration. + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] -- This tutorial requires version 2.0.20 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +## Sample script + +[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] -## Example script +### Run the script -[!code-azurecli-interactive[main](../../../cli_scripts/batch/manage-pool/manage-pool-windows.sh "Manage Windows Cloud Services Pool")] +:::code language="azurecli" source="~/azure_cli_scripts/batch/manage-pool/manage-pool-windows.sh" id="FullScript"::: -## Clean up deployment +## Clean up resources -Run the following command to remove the -resource group and all resources associated with it. +[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] -```azurecli-interactive -az group delete --name myResourceGroup +```azurecli +az group delete --name $resourceGroup ``` -## Script explanation +## Sample reference This script uses the following commands. Each command in the table links to command-specific documentation. diff --git a/articles/batch/scripts/batch-cli-sample-run-job.md b/articles/batch/scripts/batch-cli-sample-run-job.md index 8f23e47135dd..1d1c4d7ce585 100644 --- a/articles/batch/scripts/batch-cli-sample-run-job.md +++ b/articles/batch/scripts/batch-cli-sample-run-job.md @@ -2,34 +2,70 @@ title: Azure CLI Script Example - Run a Batch job | Microsoft Docs description: Learn how to create a Batch job and add a series of tasks to the job using the Azure CLI. This article also shows how to monitor a job and its tasks. ms.topic: sample -ms.date: 09/17/2021 +ms.date: 05/24/2022 ms.custom: devx-track-azurecli, seo-azure-cli keywords: batch, batch job, monitor job, azure cli samples, azure cli code samples, azure cli script samples --- # CLI example: Run a job and tasks with Azure Batch -This script creates a Batch job and adds a series of tasks to the job. It also demonstrates -how to monitor a job and its tasks. +This script creates a Batch job and adds a series of tasks to the job. It also demonstrates how to monitor a job and its tasks. + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] -- This tutorial requires version 2.0.20 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +## Sample script + +[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] + +### Create a Batch account in Batch service mode + +:::code language="azurecli" source="~/azure_cli_scripts/batch/run-job/run-job.sh" id="FullScript"::: + +### To add many tasks at once + +To add many tasks at once, specify the tasks in a JSON file, and pass it to the command. For format, see https://github.com/Azure/azure-docs-cli-python-samples/blob/master/batch/run-job/tasks.json. Provide the absolute path to the JSON file. For an example JSON file, see https://github.com/Azure-Samples/azure-cli-samples/blob/master/batch/run-job/tasks.json. + +```azurecli +az batch task create \ + --job-id myjob \ + --json-file tasks.json +``` + +### To update the job + +Update the job so that it is automatically marked as completed once all the tasks are finished. + +```azurecli +az batch job set \ +--job-id myjob \ +--on-all-tasks-complete terminatejob +``` + +### To monitor the status of the job + +```azurecli +az batch job show --job-id myjob +``` -## Example script +### To monitor the status of a task -[!code-azurecli-interactive[main](../../../cli_scripts/batch/run-job/run-job.sh "Run Job")] +```azurecli +az batch task show \ + --job-id myjob \ + --task-id task1 +``` -## Clean up deployment +## Clean up resources -Run the following command to remove the -resource group and all resources associated with it. +[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] -```azurecli-interactive -az group delete --name myResourceGroup +```azurecli +az group delete --name $resourceGroup ``` -## Script explanation +## Sample reference This script uses the following commands. Each command in the table links to command-specific documentation. diff --git a/articles/chaos-studio/chaos-studio-overview.md b/articles/chaos-studio/chaos-studio-overview.md index a38373662259..8e7384322c3b 100644 --- a/articles/chaos-studio/chaos-studio-overview.md +++ b/articles/chaos-studio/chaos-studio-overview.md @@ -1,54 +1,71 @@ --- -title: What is Azure Chaos Studio? -description: Understand Azure Chaos Studio, an Azure service that helps you to measure, understand, and build application and service resilience to real world incidents using chaos engineering to inject faults against your service then monitor how the service responds to disruptions. +title: What is Azure Chaos Studio (Preview)? +description: Measure, understand, and build resilience to incidents by using chaos engineering to inject faults and monitor how your application responds. services: chaos-studio author: johnkemnetz ms.topic: overview -ms.date: 11/11/2021 +ms.date: 05/27/2022 ms.author: johnkem ms.service: chaos-studio -ms.custom: template-overview,ignite-fall-2021 +ms.custom: template-overview,ignite-fall-2021, kr2b-contr-experiment --- -# What is Azure Chaos Studio Preview? +# What is Azure Chaos Studio (Preview)? -Azure Chaos Studio is a managed service for improving resilience by injecting faults into your Azure applications. Running controlled fault injection experiments against your applications, a practice known as chaos engineering, helps you to measure, understand, and improve resilience against real-world incidents, such as a region outages or application failures causing high CPU utilization on a VM. +[Azure Chaos Studio](https://azure.microsoft.com/services/chaos-studio) is a managed service that uses chaos engineering to help you measure, understand, and improve your cloud application and service resilience. Chaos engineering is a methodology by which you inject real-world faults into your application to run controlled fault injection experiments. + +Resilience is the capability of a system to handle and recover from disruptions. Application disruptions can cause errors and failures that can adversely affect your business or mission. Whether you're developing, migrating, or operating Azure applications, it's important to validate and improve your application's resilience. + +Chaos Studio helps you avoid negative consequences by validating that your application responds effectively to disruptions and failures. You can use Chaos Studio to test resilience against real-world incidents, like outages or high CPU utilization on virtual machines (VMs). + +The following video provides more background about Azure Chaos Studio: > [!VIDEO https://aka.ms/docs/player?id=29017ee4-bdfa-491e-acfe-8876e93c505b] -## Why should I use Chaos Studio? +## Chaos Studio scenarios + +You can use chaos engineering for various resilience validation scenarios that span the service development and operations lifecycle. There are two types of scenarios: + +- *Shift right* scenarios use a production or pre-production environment. Usually, you do shift right scenarios with real customer traffic or simulated load. +- *Shift left* scenarios can use a development or shared test environment. You can do shift left scenarios without any real customer traffic. -Whether you are developing a new application that will be hosted on Azure, migrating an existing application to Azure, or operating an application that already runs on Azure, it is important to validate and improve your application's resilience. Resilience is the capability of a system to handle and recover from disruptions. Disruptions in your application's availability can result in errors and failures for users, which in turn can have negative consequences on your business or mission. +You can use Chaos Studio for the following common chaos engineering scenarios: -When running an application in the cloud, avoiding these negative consequences requires you to validate that your application responds effectively to disruptions that could be caused by a service you depend on, disruptions caused by a failure in the service itself, or even disruptions to incident response tooling and processes. Chaos experimentation enables you to test that your cloud-hosted application is resilient to failures. +- Reproduce an incident that affected your application, to better understand the failure. Ensure that post-incident repairs prevent the incident from recurring. +- Prepare for a major event or season with "game day" load, scale, performance, and resilience validation. +- Do business continuity and disaster recovery (BCDR) drills to ensure that your application can recover quickly and preserve critical data in a disaster. +- Run high availability (HA) drills to test application resilience against region outages, network configuration errors, high stress events, or noisy neighbor issues. +- Develop application performance benchmarks. +- Plan capacity needs for production environments. +- Run stress tests or load tests. +- Ensure that services migrated from an on-premises or other cloud environment remain resilient to known failures. +- Build confidence in services built on cloud-native architectures. +- Validate that live site tooling, observability data, and on-call processes still work in unexpected conditions. -## When would I use Chaos Studio? +For many of these scenarios, you first build resilience using ad-hoc chaos experiments. Then, you continuously validate that new deployments won't regress resilience, by running chaos experiments as deployment gates in your continuous integration/continuous deployment (CI/CD) pipelines. -Chaos engineering can be used for a wide variety of resilience validation scenarios. These scenarios span the entire service development and operation lifecycle and can be categorized as either *shift right,* wherein the scenario is best validated in a production or pre-production environment, or *shift left,* wherein the scenario could be validated in a development environment or shared test environment. Typically shift right scenarios should be done with real customer traffic or simulated load whereas shift left scenarios can be done without any real customer traffic. Some common scenarios where chaos engineering can be applied are: -* Reproducing an incident that impacted your application to better understand the failure mode or ensure that post-incident repair items will prevent the incident from recurring. -* Running "game days" - load, scale, performance, and resilience validation of a service in preparation for a major user event or season. -* Performing business continuity / disaster recovery (BCDR) drills to ensure that if your application were impacted by a major disaster it could recover quickly and critical data is preserved. -* Running high availability drills to test application resilience against specific failures such as region outages, network configuration errors, high stress events, or noisy neighbor issues. -* Developing application performance benchmarks. -* Planning capacity needs for production environments. -* Running stress tests or load tests. -* Ensuring services migrated from an on-premises or other cloud environment remain resilient to known failures. -* Building confidence in services built on cloud-native architectures. -* Validating that live site tooling, observability data, and on-call processes work as expected under unexpected conditions. +## How Chaos Studio works -For many of these scenarios, you first build resilience using ad-hoc chaos experiments then continuously validate that new deployments won't regress resilience using chaos experiments as a deployment gate in your CI/CD pipeline. +With Chaos Studio, you can orchestrate safe, controlled fault injection on your Azure resources. Chaos experiments are the core of Chaos Studio. A chaos experiment describes the faults to run and the resources to run against. You can organize faults to run in parallel or sequence, depending on your needs. -## How does Chaos Studio work? +Chaos Studio supports two types of faults: -Chaos Studio enables you to orchestrate fault injection on your Azure resources in a safe and controlled way. At the core of Chaos Studio is chaos experiment. A chaos experiment is an Azure resource that describes the faults that should be run and the resources those faults should be run against. Faults can be organized to run in parallel or sequentially, depending on your needs. Chaos Studio supports two types of faults - *service-direct* faults, which run directly against an Azure resource without any installation or instrumentation (for example, rebooting an Azure Cache for Redis cluster or adding network latency to AKS pods), and *agent-based* faults, which run in virtual machines or virtual machine scale sets to perform in-guest failures (for example, applying virtual memory pressure or killing a process). Each fault has specific parameters you can control, like which process to kill or how much memory pressure to generate. +- *Service-direct* faults run directly against an Azure resource, without any installation or instrumentation. Examples include rebooting an Azure Cache for Redis cluster, or adding network latency to Azure Kubernetes Service (AKS) pods. +- *Agent-based* faults run in VMs or virtual machine scale sets to do in-guest failures. Examples include applying virtual memory pressure or killing a process. -When you build a chaos experiment, you define one or more *steps* that execute sequentially, each step containing one or more *branches* that run in parallel within the step, and each branch containing one or more *actions* such as injecting a fault or waiting for a certain duration. Finally, you organize the resources (*targets*) that each fault will be run against into groups called selectors so that you can easily reference a group of resources in each action. +Each fault has specific parameters you can configure, like which process to kill or how much memory pressure to generate. + +When you build a chaos experiment, you define one or more *steps* that execute sequentially. Each step contains one or more *branches* that run in parallel within the step. Each branch contains one or more *actions*, such as injecting a fault or waiting for a certain duration. + +You organize resource *targets* to run faults against into groups called *selectors*, so you can easily reference a group of resources in each action. + +The following diagram shows the layout of a chaos experiment in Chaos Studio: ![Diagram showing the layout of a chaos experiment.](images/chaos-experiment.png) -A chaos experiment is an Azure resource that lives in a subscription and resource group. You can use the Azure portal or the Chaos Studio REST API to create, update, start, cancel, and view the status of an experiment. +A chaos experiment is an Azure resource in a subscription and resource group. You can use the Azure portal or the [Chaos Studio REST API](/rest/api/chaosstudio) to create, update, start, cancel, and view the status of experiments. ## Next steps -Get started creating and running chaos experiments to improve application resilience with Chaos Studio using the links below. + - [Create and run your first experiment](chaos-studio-tutorial-service-direct-portal.md) - [Learn more about chaos engineering](chaos-studio-chaos-engineering-overview.md) diff --git a/articles/cloud-services-extended-support/cloud-services-model-and-package.md b/articles/cloud-services-extended-support/cloud-services-model-and-package.md index df9111e958b9..5d66f53df9b8 100644 --- a/articles/cloud-services-extended-support/cloud-services-model-and-package.md +++ b/articles/cloud-services-extended-support/cloud-services-model-and-package.md @@ -20,11 +20,6 @@ Once the cloud service is running in Azure, you can reconfigure it through the * * I want to know more about the [ServiceDefinition.csdef](#csdef) and [ServiceConfig.cscfg](#cscfg) files. * I already know about that, give me [some examples](#next-steps) on what I can configure. * I want to create the [ServicePackage.cspkg](#cspkg). -* I am using Visual Studio and I want to... - * [Create a cloud service][vs_create] - * [Reconfigure an existing cloud service][vs_reconfigure] - * [Deploy a Cloud Service project][vs_deploy] - * [Remote desktop into a cloud service instance][remotedesktop] diff --git a/articles/cognitive-services/Translator/document-translation/create-sas-tokens.md b/articles/cognitive-services/Translator/document-translation/create-sas-tokens.md index 72673ed42d0a..62da138a2b0e 100644 --- a/articles/cognitive-services/Translator/document-translation/create-sas-tokens.md +++ b/articles/cognitive-services/Translator/document-translation/create-sas-tokens.md @@ -1,27 +1,97 @@ --- -title: Create shared access signature (SAS) tokens for containers and blobs with Microsoft Storage Explorer +title: Create shared access signature (SAS) tokens for containers and blobs with Microsoft Storage Explorer description: How to create Shared Access Signature tokens (SAS) for containers and blobs with Microsoft Storage Explorer and the Azure portal. ms.topic: how-to manager: nitinme ms.author: lajanuar author: laujan -ms.date: 04/26/2022 +ms.date: 05/27/2022 --- # Create SAS tokens for your storage containers -In this article, you'll learn how to create shared access signature (SAS) tokens using the Azure Storage Explorer or the Azure portal. A SAS token provides secure, delegated access to resources in your Azure storage account. +In this article, you'll learn how to create user delegation, shared access signature (SAS) tokens, using the Azure portal or Azure Storage Explorer. User delegation SAS tokens are secured with Azure AD credentials. SAS tokens provide secure, delegated access to resources in your Azure storage account. -## Create your SAS tokens with Azure Storage Explorer +At a high level, here's how SAS tokens work: -### Prerequisites +* Your application submits the SAS token to Azure Storage as part of a REST API request. -* You'll need a [**Azure Storage Explorer**](../../../vs-azure-tools-storage-manage-with-storage-explorer.md) app installed in your Windows, macOS, or Linux development environment. Azure Storage Explorer is a free tool that enables you to easily manage your Azure cloud storage resources. -* After the Azure Storage Explorer app is installed, [connect it the storage account](../../../vs-azure-tools-storage-manage-with-storage-explorer.md?tabs=windows#connect-to-a-storage-account-or-service) you're using for Document Translation. +* If the storage service verifies that the SAS is valid, the request is authorized. -### Create your tokens +* If the SAS token is deemed invalid, the request is declined and the error code 403 (Forbidden) is returned. -### [SAS tokens for containers](#tab/Containers) +Azure Blob Storage offers three resource types: + +* **Storage** accounts provide a unique namespace in Azure for your data. +* **Data storage containers** are located in storage accounts and organize sets of blobs (files, text, or images). +* **Blobs** are located in containers and store text and binary data such as files, text, and images. + +> [!IMPORTANT] +> +> * SAS tokens are used to grant permissions to storage resources, and should be protected in the same manner as an account key. +> +> * Operations that use SAS tokens should be performed only over an HTTPS connection, and SAS URIs should only be distributed on a secure connection such as HTTPS. + +## Prerequisites + +To get started, you'll need the following resources: + +* An active [Azure account](https://azure.microsoft.com/free/cognitive-services/). If you don't have one, you can [create a free account](https://azure.microsoft.com/free/). + +* A [Translator](https://ms.portal.azure.com/#create/Microsoft.CognitiveServicesTextTranslation) resource. + +* A **standard performance** [Azure Blob Storage account](https://portal.azure.com/#create/Microsoft.StorageAccount-ARM). You'll create containers to store and organize your files within your storage account. If you don't know how to create an Azure storage account with a storage container, follow these quickstarts: + + * [Create a storage account](../../../storage/common/storage-account-create.md). When you create your storage account, select **Standard** performance in the **Instance details** > **Performance** field. + * [Create a container](../../../storage/blobs/storage-quickstart-blobs-portal.md#create-a-container). When you create your container, set **Public access level** to **Container** (anonymous read access for containers and files) in the **New Container** window. + +## Create SAS tokens in the Azure portal + + + +Go to the [Azure portal](https://portal.azure.com/#home) and navigate to your container or a specific file as follows and continue with the steps below: + +| Create SAS token for a container| Create SAS token for a specific file| +|:-----:|:-----:| +**Your storage account** → **containers** → **your container** |**Your storage account** → **containers** → **your container**→ **your file** | + +1. Right-click the container or file and select **Generate SAS** from the drop-down menu. + +1. Select **Signing method** → **User delegation key**. + +1. Define **Permissions** by checking and/or clearing the appropriate check box: + + * Your **source** container or file must have designated **read** and **list** access. + + * Your **target** container or file must have designated **write** and **list** access. + +1. Specify the signed key **Start** and **Expiry** times. + + * When you create a shared access signature (SAS), the default duration is 48 hours. After 48 hours, you'll need to create a new token. + * Consider setting a longer duration period for the time you'll be using your storage account for Translator Service operations. + * The value for the expiry time is a maximum of seven days from the creation of the SAS token. + +1. The **Allowed IP addresses** field is optional and specifies an IP address or a range of IP addresses from which to accept requests. If the request IP address doesn't match the IP address or address range specified on the SAS token, it won't be authorized. + +1. The **Allowed protocols** field is optional and specifies the protocol permitted for a request made with the SAS. The default value is HTTPS. + +1. Review then select **Generate SAS token and URL**. + +1. The **Blob SAS token** query string and **Blob SAS URL** will be displayed in the lower area of window. + +1. **Copy and paste the Blob SAS token and URL values in a secure location. They'll only be displayed once and cannot be retrieved once the window is closed.** + +1. To [construct a SAS URL](#use-your-sas-url-to-grant-access), append the SAS token (URI) to the URL for a storage service. + +## Create SAS tokens with Azure Storage Explorer + +Azure Storage Explorer is a free standalone app that enables you to easily manage your Azure cloud storage resources from your desktop. + +* You'll need the [**Azure Storage Explorer**](../../../vs-azure-tools-storage-manage-with-storage-explorer.md) app installed in your Windows, macOS, or Linux development environment. + +* After the Azure Storage Explorer app is installed, [connect it to the storage account](../../../vs-azure-tools-storage-manage-with-storage-explorer.md?tabs=windows#connect-to-a-storage-account-or-service) you're using for Document Translation. Follow the steps below to create tokens for a storage container or specific blob file: + +### [SAS tokens for storage containers](#tab/Containers) 1. Open the Azure Storage Explorer app on your local machine and navigate to your connected **Storage Accounts**. 1. Expand the Storage Accounts node and select **Blob Containers**. @@ -34,74 +104,72 @@ In this article, you'll learn how to create shared access signature (SAS) tokens * Define your container **Permissions** by checking and/or clearing the appropriate check box. * Review and select **Create**. -1. A new window will appear with the **Container** name, **URI**, and **Query string** for your container. +1. A new window will appear with the **Container** name, **URI**, and **Query string** for your container. 1. **Copy and paste the container, URI, and query string values in a secure location. They'll only be displayed once and can't be retrieved once the window is closed.** -1. To construct a SAS URL, append the SAS token (URI) to the URL for a storage service. +1. To [construct a SAS URL](#use-your-sas-url-to-grant-access), append the SAS token (URI) to the URL for a storage service. -### [SAS tokens for blobs](#tab/blobs) +### [SAS tokens for specific blob file](#tab/blobs) 1. Open the Azure Storage Explorer app on your local machine and navigate to your connected **Storage Accounts**. 1. Expand your storage node and select **Blob Containers**. 1. Expand the Blob Containers node and select a **container** node to display the contents in the main window. -1. Select the blob where you wish to delegate SAS access and right-click to display the options menu. +1. Select the file where you wish to delegate SAS access and right-click to display the options menu. 1. Select **Get Shared Access Signature...** from options menu. 1. In the **Shared Access Signature** window, make the following selections: * Select your **Access policy** (the default is none). * Specify the signed key **Start** and **Expiry** date and time. A short lifespan is recommended because, once generated, a SAS can't be revoked. * Select the **Time zone** for the Start and Expiry date and time (default is Local). * Define your container **Permissions** by checking and/or clearing the appropriate check box. + * Your **source** container or file must have designated **read** and **list** access. + * Your **target** container or file must have designated **write** and **list** access. + * Select **key1** or **key2**. * Review and select **Create**. -1. A new window will appear with the **Blob** name, **URI**, and **Query string** for your blob. + +1. A new window will appear with the **Blob** name, **URI**, and **Query string** for your blob. 1. **Copy and paste the blob, URI, and query string values in a secure location. They will only be displayed once and cannot be retrieved once the window is closed.** -1. To construct a SAS URL, append the SAS token (URI) to the URL for a storage service. +1. To [construct a SAS URL](#use-your-sas-url-to-grant-access), append the SAS token (URI) to the URL for a storage service. --- -## Create SAS tokens for blobs in the Azure portal +### Use your SAS URL to grant access - -### Prerequisites +The SAS URL includes a special set of [query parameters](/rest/api/storageservices/create-user-delegation-sas#assign-permissions-with-rbac). Those parameters indicate how the resources may be accessed by the client. -To get started, you'll need: +You can include your SAS URL with REST API requests in two ways: -* An active [**Azure account**](https://azure.microsoft.com/free/cognitive-services/). If you don't have one, you can [**create a free account**](https://azure.microsoft.com/free/). -* A [**Translator**](https://portal.azure.com/#create/Microsoft) service resource (**not** a Cognitive Services multi-service resource. *See* [Create a new Azure resource](../../cognitive-services-apis-create-account.md#create-a-new-azure-cognitive-services-resource). -* An [**Azure Blob Storage account**](https://portal.azure.com/#create/Microsoft.StorageAccount-ARM). You will create containers to store and organize your blob data within your storage account. +* Use the **SAS URL** as your sourceURL and targetURL values. -### Create your tokens +* Append the **SAS query string** to your existing sourceURL and targetURL values. -Go to the [Azure portal](https://portal.azure.com/#home) and navigate as follows: +Here is a sample REST API request: - **Your storage account** → **containers** → **your container** → **your blob** +```json +{ + "inputs": [ + { + "storageType": "File", + "source": { + "sourceUrl": "https://my.blob.core.windows.net/source-en/source-english.docx?sv=2019-12-12&st=2021-01-26T18%3A30%3A20Z&se=2021-02-05T18%3A30%3A00Z&sr=c&sp=rl&sig=d7PZKyQsIeE6xb%2B1M4Yb56I%2FEEKoNIF65D%2Fs0IFsYcE%3D" + }, + "targets": [ + { + "targetUrl": "https://my.blob.core.windows.net/target/try/Target-Spanish.docx?sv=2019-12-12&st=2021-01-26T18%3A31%3A11Z&se=2021-02-05T18%3A31%3A00Z&sr=c&sp=wl&sig=AgddSzXLXwHKpGHr7wALt2DGQJHCzNFF%2F3L94JHAWZM%3D", + "language": "es" + }, + { + "targetUrl": "https://my.blob.core.windows.net/target/try/Target-German.docx?sv=2019-12-12&st=2021-01-26T18%3A31%3A11Z&se=2021-02-05T18%3A31%3A00Z&sr=c&sp=wl&sig=AgddSzXLXwHKpGHr7wALt2DGQJHCzNFF%2F3L94JHAWZM%3D", + "language": "de" + } + ] + } + ] +} +``` -1. Select **Generate SAS** from the menu near the top of the page. - -1. Select **Signing method** → **User delegation key**. - -1. Define **Permissions** by checking and/or clearing the appropriate check box. - -1. Specify the signed key **Start** and **Expiry** times. - -1. The **Allowed IP addresses** field is optional and specifies an IP address or a range of IP addresses from which to accept requests. If the request IP address doesn't match the IP address or address range specified on the SAS token, it won't be authorized. - -1. The **Allowed protocols** field is optional and specifies the protocol permitted for a request made with the SAS. The default value is HTTPS. - -1. Review then select **Generate SAS token and URL**. - -1. The **Blob SAS token** query string and **Blob SAS URL** will be displayed in the lower area of window. - -1. **Copy and paste the Blob SAS token and URL values in a secure location. They'll only be displayed once and cannot be retrieved once the window is closed.** - -1. To construct a SAS URL, append the SAS token (URI) to the URL for a storage service. - -## Learn more - -* [Create SAS tokens for blobs or containers programmatically](../../../storage/blobs/sas-service-create.md) -* [Permissions for a directory, container, or blob](/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob) +That's it! You've learned how to create SAS tokens to authorize how clients access your data. ## Next steps > [!div class="nextstepaction"] > [Get Started with Document Translation](get-started-with-document-translation.md) > -> diff --git a/articles/cognitive-services/Translator/how-to-create-translator-resource.md b/articles/cognitive-services/Translator/how-to-create-translator-resource.md index 9d98c96040db..ba436dc15a20 100644 --- a/articles/cognitive-services/Translator/how-to-create-translator-resource.md +++ b/articles/cognitive-services/Translator/how-to-create-translator-resource.md @@ -20,31 +20,16 @@ In this article, you'll learn how to create a Translator resource in the Azure p To get started, you'll need an active [**Azure account**](https://azure.microsoft.com/free/cognitive-services/). If you don't have one, you can [**create a free 12-month subscription**](https://azure.microsoft.com/free/). -## Translator resource types +## Create your resource The Translator service can be accessed through two different resource types: -* **Single-service** resource types enable access to a single service API key and endpoint. - -* **Multi-service** resource types enable access to multiple Cognitive Services using a single API key and endpoint. The Cognitive Services resource is currently available for the following services: - * Language ([Translator](../translator/translator-overview.md), [Language Understanding (LUIS)](../luis/what-is-luis.md), [Language service](../text-analytics/overview.md)) - * Vision ([Computer Vision](../computer-vision/overview.md)), ([Face](../face/overview.md)) - * Decision ([Content Moderator](../content-moderator/overview.md)) - -## Create your resource - -* Navigate directly to the [**Create Translator**](https://portal.azure.com/#create/Microsoft.CognitiveServicesTextTranslation) page in the Azure portal to complete your project details. +* [**Single-service**](https://portal.azure.com/#create/Microsoft.CognitiveServicesTextTranslation) resource types enable access to a single service API key and endpoint. -* Navigate directly to the [**Create Cognitive Services**](https://portal.azure.com/#create/Microsoft.CognitiveServicesAllInOne) page in the Azure portal to complete your project details. +* [**Multi-service**](https://portal.azure.com/#create/Microsoft.CognitiveServicesAllInOne) resource types enable access to multiple Cognitive Services using a single API key and endpoint. The Cognitive Services resource is currently available for the following services: ->[!TIP] ->If you prefer, you can start on the Azure Portal home page to begin the **Create** process as follows: -> -> 1. Navigate to the [**Azure Portal**](https://portal.azure.com/#home) home page. -> 1. Select ➕**Create a resource** from the Azure services menu. ->1. In the **Search the Marketplace** search box, enter and select **Translator** (single-service resource) or **Cognitive Services** (multi-service resource). *See* [Choose your resource type](#create-your-resource), above. -> 1. Select **Create** and you will be taken to the project details page. ->

    +> [!TIP] +> Create a Cognitive Services resource if you plan to access multiple cognitive services under a single endpoint/key. For Translator Service access only, create a Translator single-service resource. Please note that you'll need a single-service resource if you intend to use [Azure Active Directory authentication](../../active-directory/authentication/overview-authentication.md). ## Complete your project and instance details diff --git a/articles/cognitive-services/language-service/concepts/model-lifecycle.md b/articles/cognitive-services/language-service/concepts/model-lifecycle.md index 6e7725b18baa..1ac568f08ec0 100644 --- a/articles/cognitive-services/language-service/concepts/model-lifecycle.md +++ b/articles/cognitive-services/language-service/concepts/model-lifecycle.md @@ -101,10 +101,10 @@ Use the table below to find which API versions are supported by each feature: | Feature | Supported versions | Latest Generally Available version | Latest preview version | |-----------------------------------------------------|---------------------------------------------------------------------|------------------------------------|------------------------| -| Custom text classification | `2022-03-01-preview` | | `2022-03-01-preview` | -| Conversational language understanding | `2022-03-01-preview` | | `2022-03-01-preview` | -| Custom named entity recognition | `2022-03-01-preview` | | `2022-03-01-preview` | -| Orchestration workflow | `2022-03-01-preview` | | `2022-03-01-preview` | +| Custom text classification | `2022-05-01` ,`2022-05-15-preview` | `2022-05-01` | `2022-05-15-preview` | +| Conversational language understanding | `2022-05-01` ,`2022-05-15-preview` | `2022-05-01` | `2022-05-15-preview` | +| Custom named entity recognition | `2022-05-01` ,`2022-05-15-preview` | `2022-05-01` | `2022-05-15-preview` | +| Orchestration workflow | `2022-05-01`,`2022-05-15-preview` | `2022-05-01` | `2022-05-15-preview` | ## Next steps diff --git a/articles/cognitive-services/language-service/concepts/use-asynchronously.md b/articles/cognitive-services/language-service/concepts/use-asynchronously.md index 5fb6bcdb15f7..f5d87a5b76e0 100644 --- a/articles/cognitive-services/language-service/concepts/use-asynchronously.md +++ b/articles/cognitive-services/language-service/concepts/use-asynchronously.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: conceptual -ms.date: 12/03/2021 +ms.date: 05/27/2022 ms.author: aahi --- @@ -18,11 +18,12 @@ The Language service enables you to send API requests asynchronously, using eith Currently, the following features are available to be used asynchronously: * Entity linking -* Extractive summarization +* Document summarization +* Conversation summarization * Key phrase extraction * Language detection * Named Entity Recognition (NER) -* Personally Identifiable Information (PII) detection +* Customer content detection * Sentiment analysis and opinion mining * Text Analytics for health diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/call-api.md b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/call-api.md index aaf42234384b..150a53c1c76c 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/call-api.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/call-api.md @@ -68,8 +68,8 @@ You can also use the client libraries provided by the Azure SDK to send requests |Language |Package version | |---------|---------| - |.NET | [5.2.0-beta.2](https://www.nuget.org/packages/Azure.AI.TextAnalytics/5.2.0-beta.2) | - |Python | [5.2.0b2](https://pypi.org/project/azure-ai-textanalytics/5.2.0b2/) | + |.NET | [1.0.0-beta.3 ](https://www.nuget.org/packages/Azure.AI.Language.Conversations/1.0.0-beta.3) | + |Python | [1.1.0b1](https://pypi.org/project/azure-ai-language-conversations/) | 4. After you've installed the client library, use the following samples on GitHub to start calling the API. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/call-api.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/call-api.md index 9ede1b5c043f..2f2494e7baf4 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/call-api.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/how-to/call-api.md @@ -57,10 +57,10 @@ First you will need to get your resource key and endpoint: |Language |Package version | |---------|---------| - |.NET | [5.2.0-beta.2](https://www.nuget.org/packages/Azure.AI.TextAnalytics/5.2.0-beta.2) | - |Java | [5.2.0-beta.2](https://mvnrepository.com/artifact/com.azure/azure-ai-textanalytics/5.2.0-beta.2) | - |JavaScript | [5.2.0-beta.2](https://www.npmjs.com/package/@azure/ai-text-analytics/v/5.2.0-beta.2) | - |Python | [5.2.0b2](https://pypi.org/project/azure-ai-textanalytics/5.2.0b2/) | + |.NET | [5.2.0-beta.3](https://www.nuget.org/packages/Azure.AI.TextAnalytics/5.2.0-beta.3) | + |Java | [5.2.0-beta.3](https://mvnrepository.com/artifact/com.azure/azure-ai-textanalytics/5.2.0-beta.3) | + |JavaScript | [6.0.0-beta.1](https://www.npmjs.com/package/@azure/ai-text-analytics/v/6.0.0-beta.1) | + |Python | [5.2.0b4](https://pypi.org/project/azure-ai-textanalytics/5.2.0b4/) | 4. After you've installed the client library, use the following samples on GitHub to start calling the API. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/service-limits.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/service-limits.md index 3a54be878e76..9ea6ab30f2a5 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/service-limits.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/service-limits.md @@ -47,9 +47,7 @@ Custom named entity recognition is only available in some Azure regions. To use * West Europe * North Europe * UK south -* Southeast Asia * Australia East -* Sweden Central ## API limits diff --git a/articles/cognitive-services/language-service/custom-text-classification/how-to/call-api.md b/articles/cognitive-services/language-service/custom-text-classification/how-to/call-api.md index 0846b9eae13c..a65a9b891326 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/how-to/call-api.md +++ b/articles/cognitive-services/language-service/custom-text-classification/how-to/call-api.md @@ -61,10 +61,10 @@ First you will need to get your resource key and endpoint: |Language |Package version | |---------|---------| - |.NET | [5.2.0-beta.2](https://www.nuget.org/packages/Azure.AI.TextAnalytics/5.2.0-beta.2) | - |Java | [5.2.0-beta.2](https://mvnrepository.com/artifact/com.azure/azure-ai-textanalytics/5.2.0-beta.2) | - |JavaScript | [5.2.0-beta.2](https://www.npmjs.com/package/@azure/ai-text-analytics/v/5.2.0-beta.2) | - |Python | [5.2.0b2](https://pypi.org/project/azure-ai-textanalytics/5.2.0b2/) | + |.NET | [5.2.0-beta.3](https://www.nuget.org/packages/Azure.AI.TextAnalytics/5.2.0-beta.3) | + |Java | [5.2.0-beta.3](https://mvnrepository.com/artifact/com.azure/azure-ai-textanalytics/5.2.0-beta.3) | + |JavaScript | [6.0.0-beta.1](https://www.npmjs.com/package/@azure/ai-text-analytics/v/6.0.0-beta.1) | + |Python | [5.2.0b4](https://pypi.org/project/azure-ai-textanalytics/5.2.0b4/) | 4. After you've installed the client library, use the following samples on GitHub to start calling the API. diff --git a/articles/cognitive-services/language-service/custom-text-classification/service-limits.md b/articles/cognitive-services/language-service/custom-text-classification/service-limits.md index 3447bcc96bb4..b65a2bf538f8 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/service-limits.md +++ b/articles/cognitive-services/language-service/custom-text-classification/service-limits.md @@ -51,9 +51,8 @@ Custom text classification is only available in some Azure regions. To use custo * West Europe * North Europe * UK south -* Southeast Asia * Australia East -* Sweden Central + ## API limits diff --git a/articles/cognitive-services/language-service/key-phrase-extraction/tutorials/integrate-power-bi.md b/articles/cognitive-services/language-service/key-phrase-extraction/tutorials/integrate-power-bi.md index 93e9f810c2b1..4fd5e788dcd1 100644 --- a/articles/cognitive-services/language-service/key-phrase-extraction/tutorials/integrate-power-bi.md +++ b/articles/cognitive-services/language-service/key-phrase-extraction/tutorials/integrate-power-bi.md @@ -8,9 +8,9 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: tutorial -ms.date: 11/02/2021 +ms.date: 05/27/2022 ms.author: aahi -ms.custom: language-service-key-phrase, ignite-fall-2021 +ms.custom: language-service-key-phrase, ignite-fall-2021, cogserv-non-critical-language --- # Tutorial: Extract key phrases from text stored in Power BI diff --git a/articles/cognitive-services/language-service/named-entity-recognition/tutorials/extract-excel-information.md b/articles/cognitive-services/language-service/named-entity-recognition/tutorials/extract-excel-information.md index 762994dd3425..616a07c0919d 100644 --- a/articles/cognitive-services/language-service/named-entity-recognition/tutorials/extract-excel-information.md +++ b/articles/cognitive-services/language-service/named-entity-recognition/tutorials/extract-excel-information.md @@ -10,7 +10,7 @@ ms.subservice: language-service ms.topic: tutorial ms.date: 11/02/2021 ms.author: aahi -ms.custom: language-service-ner, ignite-fall-2021 +ms.custom: language-service-ner, ignite-fall-2021, cogserv-non-critical-language --- # Extract information in Excel using Named Entity Recognition(NER) and Power Automate diff --git a/articles/cognitive-services/language-service/orchestration-workflow/how-to/call-api.md b/articles/cognitive-services/language-service/orchestration-workflow/how-to/call-api.md index 8d1a5bc6866e..92412319ad3a 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/how-to/call-api.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/how-to/call-api.md @@ -69,8 +69,8 @@ You can also use the client libraries provided by the Azure SDK to send requests |Language |Package version | |---------|---------| - |.NET | [5.2.0-beta.2](https://www.nuget.org/packages/Azure.AI.TextAnalytics/5.2.0-beta.2) | - |Python | [5.2.0b2](https://pypi.org/project/azure-ai-textanalytics/5.2.0b2/) | + |.NET | [1.0.0-beta.3 ](https://www.nuget.org/packages/Azure.AI.Language.Conversations/1.0.0-beta.3) | + |Python | [1.1.0b1](https://pypi.org/project/azure-ai-language-conversations/) | 4. After you've installed the client library, use the following samples on GitHub to start calling the API. diff --git a/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation-success.png b/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation-success.png new file mode 100644 index 000000000000..299f43a02a90 Binary files /dev/null and b/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation-success.png differ diff --git a/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation.png b/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation.png new file mode 100644 index 000000000000..d945cf17cf1b Binary files /dev/null and b/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation.png differ diff --git a/articles/cognitive-services/language-service/question-answering/tutorials/bot-service.md b/articles/cognitive-services/language-service/question-answering/tutorials/bot-service.md index 21d5431f9f56..0cf1a658374c 100644 --- a/articles/cognitive-services/language-service/question-answering/tutorials/bot-service.md +++ b/articles/cognitive-services/language-service/question-answering/tutorials/bot-service.md @@ -5,7 +5,7 @@ ms.service: cognitive-services ms.subservice: language-service ms.topic: tutorial ms.date: 11/02/2021 -ms.custom: language-service-question-answering, ignite-fall-2021 +ms.custom: language-service-question-answering, ignite-fall-2021, cogserv-non-critical-language --- # Tutorial: Create a FAQ bot diff --git a/articles/cognitive-services/language-service/question-answering/tutorials/multiple-domains.md b/articles/cognitive-services/language-service/question-answering/tutorials/multiple-domains.md index edb4fe4a1c3e..53e01e412c4c 100644 --- a/articles/cognitive-services/language-service/question-answering/tutorials/multiple-domains.md +++ b/articles/cognitive-services/language-service/question-answering/tutorials/multiple-domains.md @@ -7,7 +7,7 @@ ms.topic: tutorial author: jboback ms.author: jboback ms.date: 11/02/2021 -ms.custom: language-service-question-answering, ignite-fall-2021 +ms.custom: language-service-question-answering, ignite-fall-2021, cogserv-non-critical-language --- # Add multiple categories to your FAQ bot diff --git a/articles/cognitive-services/language-service/summarization/includes/quickstarts/rest-api.md b/articles/cognitive-services/language-service/summarization/includes/quickstarts/rest-api.md index 1b4cc98c0030..d7a50e9f4ca5 100644 --- a/articles/cognitive-services/language-service/summarization/includes/quickstarts/rest-api.md +++ b/articles/cognitive-services/language-service/summarization/includes/quickstarts/rest-api.md @@ -177,7 +177,7 @@ The following cURL commands are executed from a BASH shell. Edit these commands [!INCLUDE [REST API quickstart instructions](../../../includes/rest-api-instructions.md)] ```bash -curl -i -X POST https://your-language-endpoint-here/language/analyze-conversations?api-version=2022-05-15-preview \ +curl -i -X POST https://your-language-endpoint-here/language/analyze-conversations/jobs?api-version=2022-05-15-preview \ -H "Content-Type: application/json" \ -H "Ocp-Apim-Subscription-Key: your-key-here" \ -d \ @@ -298,11 +298,11 @@ curl -X GET https://your-language-endpoint-here/language/analyze-conversation "summaries": [ { "aspect": "issue", - "text": "Customer tried to set up wifi connection for Smart Brew 300 medication machine, but it didn't work" + "text": "Customer wanted to set up wifi connection for Smart Brew 300 coffee machine, but it didn't work" }, { "aspect": "resolution", - "text": "Asked customer to try the following steps | Asked customer for the power light | Helped customer to connect to the machine" + "text": "Asked customer if the power light is slowly blinking | Checked the Contoso coffee app. It had no prompt" } ], "warnings": [] diff --git a/articles/cognitive-services/language-service/summarization/overview.md b/articles/cognitive-services/language-service/summarization/overview.md index 4c36d32a7b9c..8c8ed9a67fb1 100644 --- a/articles/cognitive-services/language-service/summarization/overview.md +++ b/articles/cognitive-services/language-service/summarization/overview.md @@ -48,7 +48,7 @@ Document summarization supports the following features: This documentation contains the following article types: * [**Quickstarts**](quickstart.md?pivots=rest-api&tabs=conversation-summarization) are getting-started instructions to guide you through making requests to the service. -* [**How-to guides**](how-to/document-summarization.md) contain instructions for using the service in more specific or customized ways. +* [**How-to guides**](how-to/conversation-summarization.md) contain instructions for using the service in more specific or customized ways. Conversation summarization is a broad topic, consisting of several approaches to represent relevant information in text. The conversation summarization feature described in this documentation enables you to use abstractive text summarization to produce a summary of issues and resolutions in transcripts of web chats and service call transcripts between customer-service agents, and your customers. @@ -86,8 +86,8 @@ Conversation summarization feature would simplify the text into the following: |Example summary | Format | Conversation aspect | |---------|----|----| -| Customer wants to use the wifi connection on their Smart Brew 300. They can’t connect it using the Contoso Coffee app. | One or two sentences | issue | -| Checked if the power light is blinking slowly. Tried to do a factory reset. | One or more sentences, generated from multiple lines of the transcript. | resolution | +| Customer wants to use the wifi connection on their Smart Brew 300. But it didn't work. | One or two sentences | issue | +| Checked if the power light is blinking slowly. Checked the Contoso coffee app. It had no prompt. Tried to do a factory reset. | One or more sentences, generated from multiple lines of the transcript. | resolution | --- diff --git a/articles/cognitive-services/language-service/toc.yml b/articles/cognitive-services/language-service/toc.yml index ac199f7cddb8..0797e5c22c18 100644 --- a/articles/cognitive-services/language-service/toc.yml +++ b/articles/cognitive-services/language-service/toc.yml @@ -942,6 +942,8 @@ items: href: /javascript/api/overview/azure/ai-text-analytics-readme?view=azure-node-preview&preserve-view=true - name: Concepts items: + - name: Data limits + href: concepts/data-limits.md - name: Multilingual and emoji support href: concepts/multilingual-emoji-support.md - name: Migrate from LUIS, QnA Maker, and Text Analytics diff --git a/articles/cognitive-services/language-service/tutorials/use-kubernetes-service.md b/articles/cognitive-services/language-service/tutorials/use-kubernetes-service.md index 22bb67bcedcc..6018348e99f0 100644 --- a/articles/cognitive-services/language-service/tutorials/use-kubernetes-service.md +++ b/articles/cognitive-services/language-service/tutorials/use-kubernetes-service.md @@ -8,9 +8,9 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: tutorial -ms.date: 11/02/2021 +ms.date: 05/27/2022 ms.author: aahi -ms.custom: ignite-fall-2021 +ms.custom: ignite-fall-2021, cogserv-non-critical-language --- # Deploy a key phrase extraction container to Azure Kubernetes Service diff --git a/articles/cognitive-services/openai/breadcrumb/toc.yml b/articles/cognitive-services/openai/breadcrumb/toc.yml index e108cc7b30fd..e24ffb8f093d 100644 --- a/articles/cognitive-services/openai/breadcrumb/toc.yml +++ b/articles/cognitive-services/openai/breadcrumb/toc.yml @@ -15,7 +15,7 @@ items: - name: Cognitive Services # Original doc set name tocHref: /legal/cognitive-services/openai # Destination doc set route - topicHref: /azure/cognitive-services/overview # Original doc set route + topicHref: /azure/cognitive-services/what-are-cognitive-services # Original doc set route items: - name: Azure OpenAI Service # Destination doc set name tocHref: /legal/cognitive-services/openai # Destination doc set route diff --git a/articles/cognitive-services/video-indexer/toc.yml b/articles/cognitive-services/video-indexer/toc.yml index e035f0936939..4070debe0b2f 100644 --- a/articles/cognitive-services/video-indexer/toc.yml +++ b/articles/cognitive-services/video-indexer/toc.yml @@ -2,36 +2,36 @@ - name: Overview items: - name: What is Video Indexer? - href: /azure-video-indexer/video-indexer-overview.md + href: /azure/azure-video-indexer/video-indexer-overview - name: Concepts - href: /azure-video-indexer/video-indexer-overview.md + href: /azure/azure-video-indexer/video-indexer-overview - name: Quickstarts expanded: true items: - name: Get started - href: /azure-video-indexer/video-indexer-get-started.md + href: /azure/azure-video-indexer/video-indexer-get-started - name: How to guides items: - name: Migrate from v1 to v2 - href: /azure-video-indexer/video-indexer-overview.md + href: /azure/azure-video-indexer/video-indexer-overview - name: Connect to Azure - href: /azure-video-indexer/connect-to-azure.md + href: /azure/azure-video-indexer/connect-to-azure - name: Manage account connected to Azure - href: /azure-video-indexer/manage-account-connected-to-azure.md + href: /azure/azure-video-indexer/manage-account-connected-to-azure - name: Use Video Indexer API - href: /azure-video-indexer/video-indexer-use-apis.md + href: /azure/azure-video-indexer/video-indexer-use-apis - name: Use API to upload and index videos - href: /azure-video-indexer/upload-index-videos.md + href: /azure/azure-video-indexer/upload-index-videos - name: Examine Video Indexer output - href: /azure-video-indexer/video-indexer-output-json-v2.md + href: /azure/azure-video-indexer/video-indexer-output-json-v2 - name: Find exact moments within videos - href: /azure-video-indexer/video-indexer-search.md + href: /azure/azure-video-indexer/video-indexer-search - name: View and edit Video Indexer insights - href: /app-service/quickstart-dotnetcore.md?tabs=netframework48 + href: /azure/app-service/quickstart-dotnetcore?tabs=netframework48 - name: Create video insights from existing videos - href: /azure-video-indexer/use-editor-create-project.md + href: /azure/azure-video-indexer/use-editor-create-project - name: Embed widgets into your application - href: /azure-video-indexer/video-indexer-embed-widgets.md + href: /azure/azure-video-indexer/video-indexer-embed-widgets - name: Reference items: - name: Video Indexer API @@ -43,4 +43,4 @@ - name: Stack Overflow href: https://stackoverflow.com/questions/tagged/microsoft-cognitive - name: Azure Roadmap - href: https://azure.microsoft.com/roadmap/?category=ai-machine-learning \ No newline at end of file + href: https://azure.microsoft.com/roadmap/?category=ai-machine-learning diff --git a/articles/communication-services/includes/phone-number-special-order.md b/articles/communication-services/includes/phone-number-special-order.md index a4f1021534c0..941a6ad0c359 100644 --- a/articles/communication-services/includes/phone-number-special-order.md +++ b/articles/communication-services/includes/phone-number-special-order.md @@ -8,5 +8,5 @@ ms.custom: references_regions --- > [!IMPORTANT] -> For high-volume orders or in the event that your desired phone number is unavailable, complete **[this form](https://github.com/Azure/Communication/blob/master/Forms/ACS%20-%20Bulk%20Number%20Acquisition.docx)** and email it to acstnrequest@microsoft.com with a subject line beginning with "Azure Communication Services Number Request:". +> For high-volume orders or in the event that your desired phone number is unavailable, complete **[this form](https://github.com/Azure/Communication/blob/master/Forms/ACS%20-%20Manual%20Number%20Acquisition%20Form%20US-UK-CA-DK.docx)** and email it to acstnrequest@microsoft.com with a subject line beginning with "Azure Communication Services Number Request:". diff --git a/articles/communication-services/quickstarts/email/includes/send-email-net.md b/articles/communication-services/quickstarts/email/includes/send-email-net.md index 7044a034de00..fcf784f67eea 100644 --- a/articles/communication-services/quickstarts/email/includes/send-email-net.md +++ b/articles/communication-services/quickstarts/email/includes/send-email-net.md @@ -174,4 +174,4 @@ dotnet run ## Sample code -You can download the sample app from [GitHub](https://github.com/Azure-Samples/communication-services-dotnet-quickstarts/tree/main/send-email) +You can download the sample app from [GitHub](https://github.com/Azure-Samples/communication-services-dotnet-quickstarts/tree/main/SendEmail) diff --git a/articles/communication-services/tutorials/media/sample-builder/azure-complete-deployment.png b/articles/communication-services/tutorials/media/virtual-visits/azure-complete-deployment.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/azure-complete-deployment.png rename to articles/communication-services/tutorials/media/virtual-visits/azure-complete-deployment.png diff --git a/articles/communication-services/tutorials/media/sample-builder/azure-resource-final.png b/articles/communication-services/tutorials/media/virtual-visits/azure-resource-final.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/azure-resource-final.png rename to articles/communication-services/tutorials/media/virtual-visits/azure-resource-final.png diff --git a/articles/communication-services/tutorials/media/sample-builder/azure-resources.png b/articles/communication-services/tutorials/media/virtual-visits/azure-resources.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/azure-resources.png rename to articles/communication-services/tutorials/media/virtual-visits/azure-resources.png diff --git a/articles/communication-services/tutorials/media/virtual-visits/bookings-acs-app-integration-url.png b/articles/communication-services/tutorials/media/virtual-visits/bookings-acs-app-integration-url.png new file mode 100644 index 000000000000..897fdef5d64f Binary files /dev/null and b/articles/communication-services/tutorials/media/virtual-visits/bookings-acs-app-integration-url.png differ diff --git a/articles/communication-services/tutorials/media/virtual-visits/bookings-services-online-meeting.png b/articles/communication-services/tutorials/media/virtual-visits/bookings-services-online-meeting.png new file mode 100644 index 000000000000..d122c7e42f2c Binary files /dev/null and b/articles/communication-services/tutorials/media/virtual-visits/bookings-services-online-meeting.png differ diff --git a/articles/communication-services/tutorials/media/virtual-visits/bookings-services.png b/articles/communication-services/tutorials/media/virtual-visits/bookings-services.png new file mode 100644 index 000000000000..8131a1982a0b Binary files /dev/null and b/articles/communication-services/tutorials/media/virtual-visits/bookings-services.png differ diff --git a/articles/communication-services/tutorials/media/sample-builder/bookings-url.png b/articles/communication-services/tutorials/media/virtual-visits/bookings-url.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/bookings-url.png rename to articles/communication-services/tutorials/media/virtual-visits/bookings-url.png diff --git a/articles/communication-services/tutorials/media/sample-builder/sample-builder-arm.png b/articles/communication-services/tutorials/media/virtual-visits/sample-builder-arm.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/sample-builder-arm.png rename to articles/communication-services/tutorials/media/virtual-visits/sample-builder-arm.png diff --git a/articles/communication-services/tutorials/media/sample-builder/sample-builder-landing.png b/articles/communication-services/tutorials/media/virtual-visits/sample-builder-landing.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/sample-builder-landing.png rename to articles/communication-services/tutorials/media/virtual-visits/sample-builder-landing.png diff --git a/articles/communication-services/tutorials/media/sample-builder/sample-builder-start.png b/articles/communication-services/tutorials/media/virtual-visits/sample-builder-start.png similarity index 100% rename from articles/communication-services/tutorials/media/sample-builder/sample-builder-start.png rename to articles/communication-services/tutorials/media/virtual-visits/sample-builder-start.png diff --git a/articles/communication-services/tutorials/media/sample-builder/virtual-visit-arch.svg b/articles/communication-services/tutorials/media/virtual-visits/virtual-visit-arch.svg similarity index 98% rename from articles/communication-services/tutorials/media/sample-builder/virtual-visit-arch.svg rename to articles/communication-services/tutorials/media/virtual-visits/virtual-visit-arch.svg index 61b62b637273..1b40351c4af7 100644 --- a/articles/communication-services/tutorials/media/sample-builder/virtual-visit-arch.svg +++ b/articles/communication-services/tutorials/media/virtual-visits/virtual-visit-arch.svg @@ -1,598 +1,598 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Export - - - - - Sheet.1076 - - - - Sheet.13000 - - - - User.1974 - User - - Sheet.13002 - - Sheet.13003 - - - - - Sheet.13004 - - Sheet.13005 - - - - - - - User - - - Web App (Was Websites).1979 - Client Application - - Sheet.13007 - - Sheet.13008 - - - - - Sheet.13009 - - Sheet.13010 - - Sheet.13011 - - Sheet.13012 - - - - - Sheet.13013 - - - - Sheet.13014 - - Sheet.13015 - - - - - Sheet.13016 - - Sheet.13017 - - - - - Sheet.13018 - - Sheet.13019 - - - - - Sheet.13020 - - Sheet.13021 - - - - - Sheet.13022 - - Sheet.13023 - - - - - Sheet.13024 - - Sheet.13025 - - - - - Sheet.13026 - - - - Sheet.13027 - - - - - Sheet.13028 - - Sheet.13029 - - - - - Sheet.13030 - - Sheet.13031 - - - - - Sheet.13032 - - Sheet.13033 - - - - - - - - Client Application - - - Dynamic connector.2007 - - - - Sheet.1 - - - - - - Mobile App (Was Mobile Services).2008 - - Sheet.13036 - - - - Sheet.13037 - - - - - Sheet.13038 - 4 - - - - 4 - - Sheet.13039 - Azure Communication Services - - - - Azure Communication Services - - User.2014 - User - - Sheet.13041 - - Sheet.13042 - - - - - Sheet.13043 - - Sheet.13044 - - - - - - - User - - - Dynamic connector.2032 - - - - Sheet.13046 - 5 - - - - 5 - - Dynamic connector.2034 - - - - Sheet.13048 - 1-2 - - - - 1-2 - - Teams.2036 - Teams - - - - - Sheet.13050 - - Sheet.13051 - - - - - - - Sheet.13052 - - - - - - - Sheet.13053 - - - - - - - Sheet.13054 - - - - - - - Sheet.13055 - - - - - - - Sheet.13056 - - - - - - - Sheet.13057 - - - - - - - Sheet.13058 - - - - - - - - - - Teams - - - Teams.2046 - Microsoft 365 Calendar (Graph) - - - - - Sheet.13060 - - Sheet.13061 - - - - - - - Sheet.13062 - - - - - - - Sheet.13063 - - - - - - - Sheet.13064 - - - - - - - Sheet.13065 - - - - - - - Sheet.13066 - - - - - - - Sheet.13067 - - - - - - - Sheet.13068 - - - - - - - - - - Microsoft 365 Calendar (Graph) - - - Dynamic connector.2056 - - - - Sheet.13070 - 3 - - - - 3 - - Dynamic connector.2058 - - - - Sheet.13072 - Voice, Video & Text Communication - - - - Voice, Video & Text Communication - - Sheet.13073 - Schedule Visit & Receive Reminder - - - - Schedule Visit & Receive Reminder - - Sheet.13074 - Start Meeting - - - - Start Meeting - - Sheet.13075 - Join Meeting - - - - Join Meeting - - Dynamic connector.2070 - - - - Sheet.13076 - - Sheet.13077 - B - - - - B - - Sheet.13078 - Microsoft 365 Booking - - - - Microsoft 365Booking - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Export + + + + + Sheet.1076 + + + + Sheet.13000 + + + + User.1974 + User + + Sheet.13002 + + Sheet.13003 + + + + + Sheet.13004 + + Sheet.13005 + + + + + + + User + + + Web App (Was Websites).1979 + Client Application + + Sheet.13007 + + Sheet.13008 + + + + + Sheet.13009 + + Sheet.13010 + + Sheet.13011 + + Sheet.13012 + + + + + Sheet.13013 + + + + Sheet.13014 + + Sheet.13015 + + + + + Sheet.13016 + + Sheet.13017 + + + + + Sheet.13018 + + Sheet.13019 + + + + + Sheet.13020 + + Sheet.13021 + + + + + Sheet.13022 + + Sheet.13023 + + + + + Sheet.13024 + + Sheet.13025 + + + + + Sheet.13026 + + + + Sheet.13027 + + + + + Sheet.13028 + + Sheet.13029 + + + + + Sheet.13030 + + Sheet.13031 + + + + + Sheet.13032 + + Sheet.13033 + + + + + + + + Client Application + + + Dynamic connector.2007 + + + + Sheet.1 + + + + + + Mobile App (Was Mobile Services).2008 + + Sheet.13036 + + + + Sheet.13037 + + + + + Sheet.13038 + 4 + + + + 4 + + Sheet.13039 + Azure Communication Services + + + + Azure Communication Services + + User.2014 + User + + Sheet.13041 + + Sheet.13042 + + + + + Sheet.13043 + + Sheet.13044 + + + + + + + User + + + Dynamic connector.2032 + + + + Sheet.13046 + 5 + + + + 5 + + Dynamic connector.2034 + + + + Sheet.13048 + 1-2 + + + + 1-2 + + Teams.2036 + Teams + + + + + Sheet.13050 + + Sheet.13051 + + + + + + + Sheet.13052 + + + + + + + Sheet.13053 + + + + + + + Sheet.13054 + + + + + + + Sheet.13055 + + + + + + + Sheet.13056 + + + + + + + Sheet.13057 + + + + + + + Sheet.13058 + + + + + + + + + + Teams + + + Teams.2046 + Microsoft 365 Calendar (Graph) + + + + + Sheet.13060 + + Sheet.13061 + + + + + + + Sheet.13062 + + + + + + + Sheet.13063 + + + + + + + Sheet.13064 + + + + + + + Sheet.13065 + + + + + + + Sheet.13066 + + + + + + + Sheet.13067 + + + + + + + Sheet.13068 + + + + + + + + + + Microsoft 365 Calendar (Graph) + + + Dynamic connector.2056 + + + + Sheet.13070 + 3 + + + + 3 + + Dynamic connector.2058 + + + + Sheet.13072 + Voice, Video & Text Communication + + + + Voice, Video & Text Communication + + Sheet.13073 + Schedule Visit & Receive Reminder + + + + Schedule Visit & Receive Reminder + + Sheet.13074 + Start Meeting + + + + Start Meeting + + Sheet.13075 + Join Meeting + + + + Join Meeting + + Dynamic connector.2070 + + + + Sheet.13076 + + Sheet.13077 + B + + + + B + + Sheet.13078 + Microsoft 365 Booking + + + + Microsoft 365Booking + + + diff --git a/articles/communication-services/tutorials/media/sample-builder/virtual-visit-options.svg b/articles/communication-services/tutorials/media/virtual-visits/virtual-visit-options.svg similarity index 98% rename from articles/communication-services/tutorials/media/sample-builder/virtual-visit-options.svg rename to articles/communication-services/tutorials/media/virtual-visits/virtual-visit-options.svg index a1f1fba75186..52ea4667f052 100644 --- a/articles/communication-services/tutorials/media/sample-builder/virtual-visit-options.svg +++ b/articles/communication-services/tutorials/media/virtual-visits/virtual-visit-options.svg @@ -1,999 +1,999 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - Export - - - - - Sheet.1076 - - - - Sheet.1077 - - - - Sheet.1078 - - - - Sheet.1079 - - - - User.1974 - External User - - Sheet.1081 - - Sheet.1082 - - - - - Sheet.1083 - - Sheet.1084 - - - - - - - External User - - - Web App (Was Websites).1979 - Custom Azure App - - Sheet.1086 - - Sheet.1087 - - - - - Sheet.1088 - - Sheet.1089 - - Sheet.1090 - - Sheet.1091 - - - - - Sheet.1092 - - - - Sheet.1093 - - Sheet.1094 - - - - - Sheet.1095 - - Sheet.1096 - - - - - Sheet.1097 - - Sheet.1098 - - - - - Sheet.1099 - - Sheet.1100 - - - - - Sheet.1101 - - Sheet.1102 - - - - - Sheet.1103 - - Sheet.1104 - - - - - Sheet.1105 - - - - Sheet.1106 - - - - - Sheet.1107 - - Sheet.1108 - - - - - Sheet.1109 - - Sheet.1110 - - - - - Sheet.1111 - - Sheet.1112 - - - - - - - - Custom Azure App - - - Mobile App (Was Mobile Services).2008 - - Sheet.1114 - - - - Sheet.1115 - - - - - User.2014 - Hosting Employee - - Sheet.1117 - - Sheet.1118 - - - - - Sheet.1119 - - Sheet.1120 - - - - - - - HostingEmployee - - - Dynamic connector.2032 - - - - Teams.2036 - Teams - - - - - Sheet.1123 - - Sheet.1124 - - - - - - - Sheet.1125 - - - - - - - Sheet.1126 - - - - - - - Sheet.1127 - - - - - - - Sheet.1128 - - - - - - - Sheet.1129 - - - - - - - Sheet.1130 - - - - - - - Sheet.1131 - - - - - - - - - - Teams - - - User.2073 - External User - - Sheet.1133 - - Sheet.1134 - - - - - Sheet.1135 - - Sheet.1136 - - - - - - - External User - - - User.2109 - Hosting Employee - - Sheet.1138 - - Sheet.1139 - - - - - Sheet.1140 - - Sheet.1141 - - - - - - - HostingEmployee - - - Dynamic connector.2114 - - - - Teams.2115 - Teams - - - - - Sheet.1144 - - Sheet.1145 - - - - - - - Sheet.1146 - - - - - - - Sheet.1147 - - - - - - - Sheet.1148 - - - - - - - Sheet.1149 - - - - - - - Sheet.1150 - - - - - - - Sheet.1151 - - - - - - - Sheet.1152 - - - - - - - - - - Teams - - - Teams.2125 - Teams - - - - - Sheet.1154 - - Sheet.1155 - - - - - - - Sheet.1156 - - - - - - - Sheet.1157 - - - - - - - Sheet.1158 - - - - - - - Sheet.1159 - - - - - - - Sheet.1160 - - - - - - - Sheet.1161 - - - - - - - Sheet.1162 - - - - - - - - - - Teams - - - User.2135 - External User - - Sheet.1164 - - Sheet.1165 - - - - - Sheet.1166 - - Sheet.1167 - - - - - - - External User - - - Web App (Was Websites).2140 - Custom Azure App - - Sheet.1169 - - Sheet.1170 - - - - - Sheet.1171 - - Sheet.1172 - - Sheet.1173 - - Sheet.1174 - - - - - Sheet.1175 - - - - Sheet.1176 - - Sheet.1177 - - - - - Sheet.1178 - - Sheet.1179 - - - - - Sheet.1180 - - Sheet.1181 - - - - - Sheet.1182 - - Sheet.1183 - - - - - Sheet.1184 - - Sheet.1185 - - - - - Sheet.1186 - - Sheet.1187 - - - - - Sheet.1188 - - - - Sheet.1189 - - - - - Sheet.1190 - - Sheet.1191 - - - - - Sheet.1192 - - Sheet.1193 - - - - - Sheet.1194 - - Sheet.1195 - - - - - - - - Custom Azure App - - - Mobile App (Was Mobile Services).2168 - - Sheet.1197 - - - - Sheet.1198 - - - - - User.2171 - Hosting Employee - - Sheet.1200 - - Sheet.1201 - - - - - Sheet.1202 - - Sheet.1203 - - - - - - - HostingEmployee - - - Web App (Was Websites).2218 - Custom Azure App - - Sheet.1205 - - Sheet.1206 - - - - - Sheet.1207 - - Sheet.1208 - - Sheet.1209 - - Sheet.1210 - - - - - Sheet.1211 - - - - Sheet.1212 - - Sheet.1213 - - - - - Sheet.1214 - - Sheet.1215 - - - - - Sheet.1216 - - Sheet.1217 - - - - - Sheet.1218 - - Sheet.1219 - - - - - Sheet.1220 - - Sheet.1221 - - - - - Sheet.1222 - - Sheet.1223 - - - - - Sheet.1224 - - - - Sheet.1225 - - - - - Sheet.1226 - - Sheet.1227 - - - - - Sheet.1228 - - Sheet.1229 - - - - - Sheet.1230 - - Sheet.1231 - - - - - - - - Custom Azure App - - - Mobile App (Was Mobile Services).2246 - - Sheet.1233 - - - - Sheet.1234 - - - - - Dynamic connector.2249 - - - - Sheet.1236 - Microsoft 365 - - - - Microsoft 365 - - Sheet.1237 - Microsoft 365 & Azure Hybrid - - - - Microsoft 365 & Azure Hybrid - - Sheet.1238 - Azure Custom - - - - Azure Custom - - + + + + + + + + + + + + + + + + + + + + + + + + + + + Export + + + + + Sheet.1076 + + + + Sheet.1077 + + + + Sheet.1078 + + + + Sheet.1079 + + + + User.1974 + External User + + Sheet.1081 + + Sheet.1082 + + + + + Sheet.1083 + + Sheet.1084 + + + + + + + External User + + + Web App (Was Websites).1979 + Custom Azure App + + Sheet.1086 + + Sheet.1087 + + + + + Sheet.1088 + + Sheet.1089 + + Sheet.1090 + + Sheet.1091 + + + + + Sheet.1092 + + + + Sheet.1093 + + Sheet.1094 + + + + + Sheet.1095 + + Sheet.1096 + + + + + Sheet.1097 + + Sheet.1098 + + + + + Sheet.1099 + + Sheet.1100 + + + + + Sheet.1101 + + Sheet.1102 + + + + + Sheet.1103 + + Sheet.1104 + + + + + Sheet.1105 + + + + Sheet.1106 + + + + + Sheet.1107 + + Sheet.1108 + + + + + Sheet.1109 + + Sheet.1110 + + + + + Sheet.1111 + + Sheet.1112 + + + + + + + + Custom Azure App + + + Mobile App (Was Mobile Services).2008 + + Sheet.1114 + + + + Sheet.1115 + + + + + User.2014 + Hosting Employee + + Sheet.1117 + + Sheet.1118 + + + + + Sheet.1119 + + Sheet.1120 + + + + + + + HostingEmployee + + + Dynamic connector.2032 + + + + Teams.2036 + Teams + + + + + Sheet.1123 + + Sheet.1124 + + + + + + + Sheet.1125 + + + + + + + Sheet.1126 + + + + + + + Sheet.1127 + + + + + + + Sheet.1128 + + + + + + + Sheet.1129 + + + + + + + Sheet.1130 + + + + + + + Sheet.1131 + + + + + + + + + + Teams + + + User.2073 + External User + + Sheet.1133 + + Sheet.1134 + + + + + Sheet.1135 + + Sheet.1136 + + + + + + + External User + + + User.2109 + Hosting Employee + + Sheet.1138 + + Sheet.1139 + + + + + Sheet.1140 + + Sheet.1141 + + + + + + + HostingEmployee + + + Dynamic connector.2114 + + + + Teams.2115 + Teams + + + + + Sheet.1144 + + Sheet.1145 + + + + + + + Sheet.1146 + + + + + + + Sheet.1147 + + + + + + + Sheet.1148 + + + + + + + Sheet.1149 + + + + + + + Sheet.1150 + + + + + + + Sheet.1151 + + + + + + + Sheet.1152 + + + + + + + + + + Teams + + + Teams.2125 + Teams + + + + + Sheet.1154 + + Sheet.1155 + + + + + + + Sheet.1156 + + + + + + + Sheet.1157 + + + + + + + Sheet.1158 + + + + + + + Sheet.1159 + + + + + + + Sheet.1160 + + + + + + + Sheet.1161 + + + + + + + Sheet.1162 + + + + + + + + + + Teams + + + User.2135 + External User + + Sheet.1164 + + Sheet.1165 + + + + + Sheet.1166 + + Sheet.1167 + + + + + + + External User + + + Web App (Was Websites).2140 + Custom Azure App + + Sheet.1169 + + Sheet.1170 + + + + + Sheet.1171 + + Sheet.1172 + + Sheet.1173 + + Sheet.1174 + + + + + Sheet.1175 + + + + Sheet.1176 + + Sheet.1177 + + + + + Sheet.1178 + + Sheet.1179 + + + + + Sheet.1180 + + Sheet.1181 + + + + + Sheet.1182 + + Sheet.1183 + + + + + Sheet.1184 + + Sheet.1185 + + + + + Sheet.1186 + + Sheet.1187 + + + + + Sheet.1188 + + + + Sheet.1189 + + + + + Sheet.1190 + + Sheet.1191 + + + + + Sheet.1192 + + Sheet.1193 + + + + + Sheet.1194 + + Sheet.1195 + + + + + + + + Custom Azure App + + + Mobile App (Was Mobile Services).2168 + + Sheet.1197 + + + + Sheet.1198 + + + + + User.2171 + Hosting Employee + + Sheet.1200 + + Sheet.1201 + + + + + Sheet.1202 + + Sheet.1203 + + + + + + + HostingEmployee + + + Web App (Was Websites).2218 + Custom Azure App + + Sheet.1205 + + Sheet.1206 + + + + + Sheet.1207 + + Sheet.1208 + + Sheet.1209 + + Sheet.1210 + + + + + Sheet.1211 + + + + Sheet.1212 + + Sheet.1213 + + + + + Sheet.1214 + + Sheet.1215 + + + + + Sheet.1216 + + Sheet.1217 + + + + + Sheet.1218 + + Sheet.1219 + + + + + Sheet.1220 + + Sheet.1221 + + + + + Sheet.1222 + + Sheet.1223 + + + + + Sheet.1224 + + + + Sheet.1225 + + + + + Sheet.1226 + + Sheet.1227 + + + + + Sheet.1228 + + Sheet.1229 + + + + + Sheet.1230 + + Sheet.1231 + + + + + + + + Custom Azure App + + + Mobile App (Was Mobile Services).2246 + + Sheet.1233 + + + + Sheet.1234 + + + + + Dynamic connector.2249 + + + + Sheet.1236 + Microsoft 365 + + + + Microsoft 365 + + Sheet.1237 + Microsoft 365 & Azure Hybrid + + + + Microsoft 365 & Azure Hybrid + + Sheet.1238 + Azure Custom + + + + Azure Custom + + diff --git a/articles/communication-services/tutorials/virtual-visits.md b/articles/communication-services/tutorials/virtual-visits.md index 3d1de17d3c41..27dab4634f56 100644 --- a/articles/communication-services/tutorials/virtual-visits.md +++ b/articles/communication-services/tutorials/virtual-visits.md @@ -6,7 +6,7 @@ manager: chpalm services: azure-communication-services ms.author: chpalm -ms.date: 01/10/2022 +ms.date: 05/24/2022 ms.topic: tutorial ms.service: azure-communication-services ms.custom: event-tier1-build-2022 @@ -29,7 +29,7 @@ Azure and Teams are interoperable. This interoperability gives organizations cho - **Microsoft 365 + Azure hybrid.** Combine Microsoft 365 Teams and Bookings with a custom Azure application for the consumer experience. Organizations take advantage of Microsoft 365's employee familiarity but customize and embed the consumer visit experience in their own application. - **Azure custom.** Build the entire solution on Azure primitives: the business experience, the consumer experience, and scheduling systems. -![Diagram of virtual visit implementation options](./media/sample-builder/virtual-visit-options.svg) +![Diagram of virtual visit implementation options](./media/virtual-visits/virtual-visit-options.svg) These three **implementation options** are columns in the table below, while each row provides a **use case** and the **enabling technologies**. @@ -49,7 +49,7 @@ There are other ways to customize and combine Microsoft tools to deliver a virtu ## Extend Microsoft 365 with Azure The rest of this tutorial focuses on Microsoft 365 and Azure hybrid solutions. These hybrid configurations are popular because they combine employee familiarity of Microsoft 365 with the ability to customize the consumer experience. They’re also a good launching point to understanding more complex and customized architectures. The diagram below shows user steps for a virtual visit: -![High-level architecture of a hybrid virtual visits solution](./media/sample-builder/virtual-visit-arch.svg) +![High-level architecture of a hybrid virtual visits solution](./media/virtual-visits/virtual-visit-arch.svg) 1. Consumer schedules the visit using Microsoft 365 Bookings. 2. Consumer gets a visit reminder through SMS and Email. 3. Provider joins the visit using Microsoft Teams. @@ -63,40 +63,55 @@ In this section we’re going to use a Sample Builder tool to deploy a Microsoft This sample uses takes advantage of the Microsoft 365 Bookings app to power the consumer scheduling experience and create meetings for providers. Thus the first step is creating a Bookings calendar and getting the Booking page URL from https://outlook.office.com/bookings/calendar. -![Booking configuration experience](./media/sample-builder/bookings-url.png) +![Screenshot of Booking configuration experience](./media/virtual-visits/bookings-url.png) + +Make sure online meeting is enable for the calendar by going to https://outlook.office.com/bookings/services. + +![Screenshot of Booking services configuration experience](./media/virtual-visits/bookings-services.png) + +And then make sure "Add online meeting" is enable. + +![Screenshot of Booking services online meeting configuration experience](./media/virtual-visits/bookings-services-online-meeting.png) + ### Step 2 – Sample Builder Use the Sample Builder to customize the consumer experience. You can reach the Sampler Builder using this [link](https://aka.ms/acs-sample-builder), or navigating to the page within the Azure Communication Services resource in the Azure portal. Step through the Sample Builder wizard and configure if Chat or Screen Sharing should be enabled. Change themes and text to you match your application. You can preview your configuration live from the page in both Desktop and Mobile browser form-factors. -[ ![Sample builder start page](./media/sample-builder/sample-builder-start.png)](./media/sample-builder/sample-builder-start.png#lightbox) +[ ![Screenshot of Sample builder start page](./media/virtual-visits/sample-builder-start.png)](./media/virtual-visits/sample-builder-start.png#lightbox) ### Step 3 - Deploy At the end of the Sample Builder wizard, you can **Deploy to Azure** or download the code as a zip. The sample builder code is publicly available on [GitHub](https://github.com/Azure-Samples/communication-services-virtual-visits-js). -[ ![Sample builder deployment page](./media/sample-builder/sample-builder-landing.png)](./media/sample-builder/sample-builder-landing.png#lightbox) +[ ![Screenshot of Sample builder deployment page](./media/virtual-visits/sample-builder-landing.png)](./media/virtual-visits/sample-builder-landing.png#lightbox) The deployment launches an Azure Resource Manager (ARM) template that deploys the themed application you configured. -![Sample builder arm template](./media/sample-builder/sample-builder-arm.png) +![Screenshot of Sample builder arm template](./media/virtual-visits/sample-builder-arm.png) After walking through the ARM template you can **Go to resource group** -![Screenshot of a completed Azure Resource Manager Template](./media/sample-builder/azure-complete-deployment.png) +![Screenshot of a completed Azure Resource Manager Template](./media/virtual-visits/azure-complete-deployment.png) ### Step 4 - Test The Sample Builder creates three resources in the selected Azure subscriptions. The **App Service** is the consumer front end, powered by Azure Communication Services. -![produced azure resources in azure portal](./media/sample-builder/azure-resources.png) +![Screenshot of produced azure resources in azure portal](./media/virtual-visits/azure-resources.png) + +Opening the App Service’s URL and navigating to `https:///VISIT` allows you to try out the consumer experience and join a Teams meeting. `https:///BOOK` embeds the Booking experience for consumer scheduling. + +![Screenshot of final view of azure app service](./media/virtual-visits/azure-resource-final.png) + +### Step 5 - Set deployed app URL in Bookings -Opening the App Service’s URL and navigating to `https:///VISITS` allows you to try out the consumer experience and join a Teams meeting. `https:///BOOK` embeds the Booking experience for consumer scheduling. +Copy your application url into your calendar Business information setting by going to https://outlook.office.com/bookings/businessinformation. -![final view of azure app service](./media/sample-builder/azure-resource-final.png) +![Screenshot of final view of bookings business information](./media/virtual-visits/bookings-acs-app-integration-url.png) ## Going to production The Sample Builder gives you the basics of a Microsoft 365 and Azure virtual visit: consumer scheduling via Bookings, consumer joins via custom app, and the provider joins via Teams. However, there are several things to consider as you take this scenario to production. ### Launching patterns -Consumers want to jump directly to the virtual visit from the scheduling reminders they receive from Bookings. In Bookings, you can provide a URL prefix that will be used in reminders. If your prefix is `https:///VISITS`, Bookings will point users to `https:///VISITS?=.` +Consumers want to jump directly to the virtual visit from the scheduling reminders they receive from Bookings. In Bookings, you can provide a URL prefix that will be used in reminders. If your prefix is `https:///VISIT`, Bookings will point users to `https:///VISIT?MEETINGURL=.` ### Integrate into your existing app The app service generated by the Sample Builder is a stand-alone artifact, designed for desktop and mobile browsers. However you may have a website or mobile application already and need to migrate these experiences to that existing codebase. The code generated by the Sample Builder should help, but you can also use: diff --git a/articles/confidential-ledger/create-client-certificate.md b/articles/confidential-ledger/create-client-certificate.md index 3e3d2089e2b8..9e0d2bd52f87 100755 --- a/articles/confidential-ledger/create-client-certificate.md +++ b/articles/confidential-ledger/create-client-certificate.md @@ -17,7 +17,7 @@ You will need a certificate in PEM format. You can create more than one certific ## OpenSSL -We recommending using OpenSSL to generate certificates. If you have git installed, you can run OpenSSL in the git shell. Otherwise, you can install OpenSSL for your OS. +We recommend using OpenSSL to generate certificates. If you have git installed, you can run OpenSSL in the git shell. Otherwise, you can install OpenSSL for your OS. - **Windows**: Install [chocolatey for Windows](https://chocolatey.org/install), open a PowerShell terminal windows in admin mode, and run `choco install openssl`. Alternatively, you can install OpenSSL for Windows directly from [here](http://gnuwin32.sourceforge.net/packages/openssl.htm). - **Linux**: Run `sudo apt-get install openssl` @@ -31,4 +31,4 @@ openssl req -new -key "privkey_name.pem" -x509 -nodes -days 365 -out "cert.pem" ## Next steps -- [Overview of Microsoft Azure confidential ledger](overview.md) \ No newline at end of file +- [Overview of Microsoft Azure confidential ledger](overview.md) diff --git a/articles/connectors/connectors-create-api-azureblobstorage.md b/articles/connectors/connectors-create-api-azureblobstorage.md index 63e8aa195eb9..e4f50bf008a8 100644 --- a/articles/connectors/connectors-create-api-azureblobstorage.md +++ b/articles/connectors/connectors-create-api-azureblobstorage.md @@ -5,7 +5,7 @@ services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: how-to -ms.date: 04/18/2022 +ms.date: 05/28/2022 tags: connectors --- @@ -15,11 +15,6 @@ From your workflow in Azure Logic Apps, you can access and manage files stored a You can connect to Blob Storage from both **Logic App (Consumption)** and **Logic App (Standard)** resource types. You can use the connector with logic app workflows in multi-tenant Azure Logic Apps, single-tenant Azure Logic Apps, and the integration service environment (ISE). With **Logic App (Standard)**, you can use either the *built-in* **Azure Blob** operations or the **Azure Blob Storage** managed connector operations. -> [!IMPORTANT] -> A logic app workflow can't directly access a storage account behind a firewall if they're both in the same region. -> As a workaround, your logic app and storage account can be in different regions. For more information about enabling -> access from Azure Logic Apps to storage accounts behind firewalls, review the [Access storage accounts behind firewalls](#access-storage-accounts-behind-firewalls) section later in this topic. - ## Prerequisites - An Azure account and subscription. If you don't have an Azure subscription, [sign up for a free Azure account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). @@ -42,7 +37,7 @@ You can connect to Blob Storage from both **Logic App (Consumption)** and **Logi ## Connector reference -For more technical details about this connector, such as triggers, actions, and limits, review the [connector's reference page](/connectors/azureblobconnector/). If you don't want to use the Blob operations, you can use the [use HTTP trigger or action along with a a managed identity for blob operations instead](#access-blob-storage-with-managed-identities). +For more technical details about this connector, such as triggers, actions, and limits, review the [connector's reference page](/connectors/azureblobconnector/). @@ -116,7 +111,7 @@ To add a Blob trigger to a logic app workflow in single-tenant Azure Logic Apps, | Task | Path syntax | |------|-------------| - | Check the root folder for a newly added blob. | **<*container-name*>** | + | Check the root folder and its nested subfolders for a newly added blob. | **<*container-name*>** | | Check the root folder for changes to a specific blob. | **<*container-name*>/<*blob-name*>.<*blob-extension*>** | | Check the root folder for changes to any blobs with the same extension, for example, **.txt**. | **<*container-name*>/{name}.txt**

    **Important**: Make sure that you use **{name}** as a literal. | | Check the root folder for changes to any blobs with names starting with a specific string, for example, **Sample-**. | **<*container-name*>/Sample-{name}**

    **Important**: Make sure that you use **{name}** as a literal. | @@ -156,7 +151,7 @@ To add a Blob action to a logic app workflow in multi-tenant Azure Logic Apps, f This example starts with the [**Recurrence** trigger](connectors-native-recurrence.md). -1. Under the trigger or action where you want to add the Blob action, select **New step** or **Add an action**, if between steps. +1. Under the trigger or action where you want to add the Blob action, select **New step** or **Add an action**, if between steps. This example uses the built-in Azure Blob action. 1. Under the designer search box, make sure that **All** is selected. In the search box, enter **Azure blob**. Select the Blob action that you want to use. @@ -298,7 +293,7 @@ You can add network security to an Azure storage account by [restricting access - To access storage accounts behind firewalls using the Azure Blob Storage managed connector in Consumption, Standard, and ISE-based logic apps, review the following documentation: - - [Access storage accounts with managed identities](#access-blob-storage-with-managed-identities) + - [Access storage accounts in same region with managed identities](#access-blob-storage-in-same-region-with-managed-identities) - [Access storage accounts in other regions](#access-storage-accounts-in-other-regions) @@ -350,7 +345,7 @@ To add your outbound IP addresses to the storage account firewall, follow these You don't have to create a private endpoint. You can just permit traffic through the ISE outbound IPs on the storage account. -### Access Blob Storage with managed identities +### Access Blob Storage in same region with managed identities To connect to Azure Blob Storage in any region, you can use [managed identities for authentication](../active-directory/managed-identities-azure-resources/overview.md). You can create an exception that gives Microsoft trusted services, such as a managed identity, access to your storage account through a firewall. diff --git a/articles/connectors/connectors-create-api-sqlazure.md b/articles/connectors/connectors-create-api-sqlazure.md index c093bcf261b2..3a39800d1bb6 100644 --- a/articles/connectors/connectors-create-api-sqlazure.md +++ b/articles/connectors/connectors-create-api-sqlazure.md @@ -1,30 +1,43 @@ --- title: Connect to SQL databases -description: Automate workflows for SQL databases on premises or in the cloud with Azure Logic Apps. +description: Connect to SQL databases from workflows in Azure Logic Apps. services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: how-to -ms.date: 04/18/2022 +ms.date: 06/01/2022 tags: connectors --- -# Connect to a SQL database from Azure Logic Apps +# Connect to a SQL database from workflows in Azure Logic Apps -This article shows how to access your SQL database with the SQL Server connector in Azure Logic Apps. You can then create automated workflows that are triggered by events in your SQL database or other systems and manage your SQL data and resources. +This article shows how to access your SQL database from a workflow in Azure Logic Apps with the SQL Server connector. You can then create automated workflows that run when triggered by events in your SQL database or in other systems and run actions to manage your SQL data and resources. -For example, you can use actions that get, insert, and delete data along with running SQL queries and stored procedures. You can create workflow that checks for new records in a non-SQL database, does some processing work, creates new records in your SQL database using the results, and sends email alerts about the new records in your SQL database. +For example, your workflow can run actions that get, insert, and delete data or that can run SQL queries and stored procedures. Your workflow can check for new records in a non-SQL database, do some processing work, use the results to create new records in your SQL database, and send email alerts about the new records. - The SQL Server connector supports the following SQL editions: +If you're new to Azure Logic Apps, review the following get started documentation: + +* [What is Azure Logic Apps](../logic-apps/logic-apps-overview.md) +* [Quickstart: Create your first logic app workflow](../logic-apps/quickstart-create-first-logic-app-workflow.md) + +## Supported SQL editions + +The SQL Server connector supports the following SQL editions: * [SQL Server](/sql/sql-server/sql-server-technical-documentation) * [Azure SQL Database](/azure/azure-sql/database/sql-database-paas-overview) * [Azure SQL Managed Instance](/azure/azure-sql/managed-instance/sql-managed-instance-paas-overview) -If you're new to Azure Logic Apps, review the following documentation: +## Connector technical reference + +The SQL Server connector has different versions, based on [logic app type and host environment](../logic-apps/logic-apps-overview.md#resource-environment-differences). -* [What is Azure Logic Apps](../logic-apps/logic-apps-overview.md) -* [Quickstart: Create your first logic app workflow](../logic-apps/quickstart-create-first-logic-app-workflow.md) +| Logic app | Environment | Connector version | +|-----------|-------------|-------------------| +| **Consumption** | Multi-tenant Azure Logic Apps | [Managed connector - Standard class](managed.md). For more information, review the [SQL Server managed connector reference](/connectors/sql). | +| **Consumption** | Integration service environment (ISE) | [Managed connector - Standard class](managed.md) and ISE version. For more information, review the [SQL Server managed connector reference](/connectors/sql). | +| **Standard** | Single-tenant Azure Logic Apps and App Service Environment v3 (Windows plans only) | [Managed connector - Standard class](managed.md) and [built-in connector](built-in.md), which is [service provider based](../logic-apps/custom-connector-overview.md#service-provider-interface-implementation).

    The built-in version differs in the following ways:

    - The built-in version has no triggers.

    - The built-in version has a single **Execute Query** action. The action can directly connect to Azure virtual networks without the on-premises data gateway.

    For the managed version, review the [SQL Server managed connector reference](/connectors/sql/). | +|||| ## Prerequisites @@ -32,7 +45,7 @@ If you're new to Azure Logic Apps, review the following documentation: * [SQL Server database](/sql/relational-databases/databases/create-a-database), [Azure SQL Database](/azure/azure-sql/database/single-database-create-quickstart), or [SQL Managed Instance](/azure/azure-sql/managed-instance/instance-create-quickstart). - The SQL connector requires that your tables contain data so that SQL connector operations can return results when called. For example, if you use Azure SQL Database, you can use the included sample databases to try the SQL connector operations. + The SQL Server connector requires that your tables contain data so that the connector operations can return results when called. For example, if you use Azure SQL Database, you can use the included sample databases to try the SQL Server connector operations. * The information required to create a SQL database connection, such as your SQL server and database names. If you're using Windows Authentication or SQL Server Authentication to authenticate access, you also need your user name and password. You can usually find this information in the connection string. @@ -57,7 +70,7 @@ If you're new to Azure Logic Apps, review the following documentation: -* To connect to an on-premises SQL server, the following extra requirements apply based on whether you have a Consumption logic app workflow, either in multi-tenant Azure Logic Apps or an [integration service environment (ISE)](../logic-apps/connect-virtual-network-vnet-isolated-environment-overview.md), or if you have a Standard logic app workflow in [single-tenant Azure Logic Apps](../logic-apps/single-tenant-overview-compare.md). +* To connect to an on-premises SQL server, the following extra requirements apply, based on whether you have a Consumption or Standard logic app workflow. * Consumption logic app workflow @@ -67,23 +80,7 @@ If you're new to Azure Logic Apps, review the following documentation: * Standard logic app workflow - In single-tenant Azure Logic Apps, you can use the built-in SQL Server connector, which requires a connection string. If you want to use the managed SQL Server connector, you need follow the same requirements as a Consumption logic app workflow in multi-tenant Azure Logic Apps. - -## Connector technical reference - -This connector is available for logic app workflows in multi-tenant Azure Logic Apps, ISEs, and single-tenant Azure Logic Apps. - -* For Consumption logic app workflows in multi-tenant Azure Logic Apps, this connector is available only as a managed connector. For more information, review the [managed SQL Server connector operations](/connectors/sql). - -* For Consumption logic app workflows in an ISE, this connector is available as a managed connector and as an ISE connector that's designed to run in an ISE. For more information, review the [managed SQL Server connector operations](/connectors/sql). - -* For Standard logic app workflows in single-tenant Azure Logic Apps, this connector is available as a managed connector and as a built-in connector that's designed to run in the same process as the single-tenant Azure Logic Apps runtime. However, the built-in version differs in the following ways: - - * The built-in SQL Server connector has no triggers. - - * The built-in SQL Server connector has only one operation: **Execute Query** - -For the managed SQL Server connector technical information, such as trigger and action operations, limits, and known issues, review the [SQL Server connector's reference page](/connectors/sql/), which is generated from the Swagger description. + You can use the SQL Server built-in connector, which requires a connection string. If you want to use the SQL Server managed connector, you need follow the same requirements as a Consumption logic app workflow in multi-tenant Azure Logic Apps. @@ -99,17 +96,17 @@ The following steps use the Azure portal, but with the appropriate Azure Logic A 1. In the Azure portal, open your blank logic app workflow in the designer. -1. Find and select the [managed SQL Server connector trigger](/connectors/sql) that you want to use. +1. Find and select the [SQL Server managed connector trigger](/connectors/sql) that you want to use. - 1. Under the designer search box, select **All**. + 1. On the designer, under the search box, select **All**. - 1. In the designer search box, enter **sql server**. + 1. In the search box, enter **sql server**. 1. From the triggers list, select the SQL trigger that you want. This example continues with the trigger named **When an item is created**. - ![Screenshot showing the Azure portal, workflow designer for Consumption logic app, search box with "sql server", and the "When an item is created" trigger selected.](./media/connectors-create-api-sqlazure/select-sql-server-trigger-consumption.png) + ![Screenshot showing the Azure portal, Consumption logic app workflow designer, search box with "sql server", and "When an item is created" trigger selected.](./media/connectors-create-api-sqlazure/select-sql-server-trigger-consumption.png) -1. If you're connecting to your SQL database for the first time, you're prompted to [create your SQL database connection now](#create-connection). After you create this connection, you can continue with the next step. +1. If the designer prompts you for connection information, [create your SQL database connection now](#create-connection). After you create this connection, you can continue with the next step. 1. In the trigger, specify the interval and frequency for how often the trigger checks the table. @@ -119,37 +116,39 @@ The following steps use the Azure portal, but with the appropriate Azure Logic A For example, to view the data in this row, you can add other actions that create a file that includes the fields from the returned row, and then send email alerts. To learn about other available actions for this connector, see the [connector's reference page](/connectors/sql/). -1. On the designer toolbar, select **Save**. +1. When you're done, save your workflow. Although this step automatically enables and publishes your logic app live in Azure, the only action that your logic app currently takes is to check your database based on your specified interval and frequency. ### [Standard](#tab/standard) -In Standard logic app workflows, only the managed SQL Server connector has triggers. The built-in SQL Server connector doesn't have any triggers. +In Standard logic app workflows, only the SQL Server managed connector has triggers. The SQL Server built-in connector doesn't have any triggers. 1. In the Azure portal, open your blank logic app workflow in the designer. -1. Find and select the [managed SQL Server connector trigger](/connectors/sql) that you want to use. +1. Find and select the [SQL Server managed connector trigger](/connectors/sql) that you want to use. - 1. Under the designer search box, select **Azure**. + 1. On the designer, select **Choose an operation**. - 1. In the designer search box, enter **sql server**. + 1. Under the **Choose an operation** search box, select **Azure**. + + 1. In the search box, enter **sql server**. 1. From the triggers list, select the SQL trigger that you want. This example continues with the trigger named **When an item is created**. - ![Screenshot showing the Azure portal, workflow designer for Standard logic app, search box with "sql server", and the "When an item is created" trigger selected.](./media/connectors-create-api-sqlazure/select-sql-server-trigger-standard.png) + ![Screenshot showing Azure portal, Standard logic app workflow designer, search box with "sql server", and "When an item is created" trigger selected.](./media/connectors-create-api-sqlazure/select-sql-server-trigger-standard.png) -1. If you're connecting to your SQL database for the first time, you're prompted to [create your SQL database connection now](#create-connection). After you create this connection, you can continue with the next step. +1. If the designer prompts you for connection information, [create your SQL database connection now](#create-connection). After you create this connection, you can continue with the next step. 1. In the trigger, specify the interval and frequency for how often the trigger checks the table. 1. To add other properties available for this trigger, open the **Add new parameter** list and select those properties. - This trigger returns only one row from the selected table, and nothing else. To perform other tasks, continue by adding either a [SQL connector action](#add-sql-action) or [another action](../connectors/apis-list.md) that performs the next task that you want in your logic app workflow. + This trigger returns only one row from the selected table, and nothing else. To perform other tasks, continue by adding either a [SQL Server connector action](#add-sql-action) or [another action](../connectors/apis-list.md) that performs the next task that you want in your logic app workflow. For example, to view the data in this row, you can add other actions that create a file that includes the fields from the returned row, and then send email alerts. To learn about other available actions for this connector, see the [connector's reference page](/connectors/sql/). -1. On the designer toolbar, select **Save**. +1. When you're done, save your workflow. Although this step automatically enables and publishes your logic app live in Azure, the only action that your logic app currently takes is to check your database based on your specified interval and frequency. @@ -159,7 +158,7 @@ In Standard logic app workflows, only the managed SQL Server connector has trigg ## Trigger recurrence shift and drift (daylight saving time) -Recurring connection-based triggers where you need to create a connection first, such as the managed SQL Server trigger, differ from built-in triggers that run natively in Azure Logic Apps, such as the [Recurrence trigger](../connectors/connectors-native-recurrence.md). For recurring connection-based triggers, the recurrence schedule isn't the only driver that controls execution, and the time zone only determines the initial start time. Subsequent runs depend on the recurrence schedule, the last trigger execution, *and* other factors that might cause run times to drift or produce unexpected behavior. For example, unexpected behavior can include failure to maintain the specified schedule when daylight saving time (DST) starts and ends. +Recurring connection-based triggers where you need to create a connection first, such as the SQL Server managed connector trigger, differ from built-in triggers that run natively in Azure Logic Apps, such as the [Recurrence trigger](../connectors/connectors-native-recurrence.md). For recurring connection-based triggers, the recurrence schedule isn't the only driver that controls execution, and the time zone only determines the initial start time. Subsequent runs depend on the recurrence schedule, the last trigger execution, *and* other factors that might cause run times to drift or produce unexpected behavior. For example, unexpected behavior can include failure to maintain the specified schedule when daylight saving time (DST) starts and ends. To make sure that the recurrence time doesn't shift when DST takes effect, manually adjust the recurrence. That way, your workflow continues to run at the expected or specified start time. Otherwise, the start time shifts one hour forward when DST starts and one hour backward when DST ends. For more information, see [Recurrence for connection-based triggers](../connectors/apis-list.md#recurrence-for-connection-based-triggers). @@ -179,21 +178,21 @@ In this example, the logic app workflow starts with the [Recurrence trigger](../ 1. In the Azure portal, open your logic app workflow in the designer. -1. Find and select the [managed SQL Server connector action](/connectors/sql) that you want to use. This example continues with the action named **Get row**. +1. Find and select the [SQL Server managed connector action](/connectors/sql) that you want to use. This example continues with the action named **Get row**. 1. Under the trigger or action where you want to add the SQL action, select **New step**. Or, to add an action between existing steps, move your mouse over the connecting arrow. Select the plus sign (**+**) that appears, and then select **Add an action**. - 1. In the **Choose an operation** box, under the designer search box, select **All**. + 1. Under the **Choose an operation** search box, select **All**. - 1. In the designer search box, enter **sql server**. + 1. In the search box, enter **sql server**. 1. From the actions list, select the SQL Server action that you want. This example uses the **Get row** action, which gets a single record. ![Screenshot showing the Azure portal, workflow designer for Consumption logic app, the search box with "sql server", and "Get row" selected in the "Actions" list.](./media/connectors-create-api-sqlazure/select-sql-get-row-action-consumption.png) -1. If you're connecting to your SQL database for the first time, you're prompted to [create your SQL database connection now](#create-connection). After you create this connection, you can continue with the next step. +1. If the designer prompts you for connection information, [create your SQL database connection now](#create-connection). After you create this connection, you can continue with the next step. 1. If you haven't already provided the SQL server name and database name, provide those values. Otherwise, from the **Table name** list, select the table that you want to use. In the **Row id** property, enter the ID for the record that you want. @@ -201,9 +200,9 @@ In this example, the logic app workflow starts with the [Recurrence trigger](../ ![Screenshot showing Consumption workflow designer and the "Get row" action with the example "Table name" property value and empty row ID.](./media/connectors-create-api-sqlazure/specify-table-row-id-consumption.png) - This action returns only one row from the selected table, and nothing else. To view the data in this row, add other actions, for example, those that create a file that includes the fields from the returned row, and store that file in a cloud storage account. To learn about other available actions for this connector, see the [connector's reference page](/connectors/sql/). + This action returns only one row from the selected table, and nothing else. To view the data in this row, add other actions. For example, such actions might create a file, include the fields from the returned row, and store the file in a cloud storage account. To learn about other available actions for this connector, see the [connector's reference page](/connectors/sql/). -1. When you're done, on the designer toolbar, select **Save**. +1. When you're done, save your workflow. ### [Standard](#tab/standard) @@ -211,21 +210,21 @@ In this example, the logic app workflow starts with the [Recurrence trigger](../ 1. Find and select the SQL Server connector action that you want to use. - 1. Under the trigger or action where you want to add the SQL Server action, select **New step**. + 1. Under the trigger or action where you want to add the SQL Server action, select the plus sign (**+**), and then select **Add an action**. - Or, to add an action between existing steps, move your mouse over the connecting arrow. Select the plus sign (**+**) that appears, and then select **Add an action**. + Or, to add an action between existing steps, select the plus sign (**+**) on the connecting arrow, and then select **Add an action**. - 1. In the **Choose an operation** box, under the designer search box, select either of the following options: + 1. Under the **Choose an operation** search box, select either of the following options: - * **Built-in** when you want to use built-in SQL Server actions such as **Execute Query** + * **Built-in** when you want to use SQL Server built-in actions such as **Execute Query** ![Screenshot showing the Azure portal, workflow designer for Standard logic app, and designer search box with "Built-in" selected underneath.](./media/connectors-create-api-sqlazure/select-built-in-category-standard.png) - * **Azure** when you want to use [managed SQL Server connector actions](/connectors/sql) such as **Get row** + * **Azure** when you want to use [SQL Server managed connector actions](/connectors/sql) such as **Get row** ![Screenshot showing the Azure portal, workflow designer for Standard logic app, and designer search box with "Azure" selected underneath.](./media/connectors-create-api-sqlazure/select-azure-category-standard.png) - 1. In the designer search box, enter **sql server**. + 1. In the search box, enter **sql server**. 1. From the actions list, select the SQL Server action that you want. @@ -241,7 +240,7 @@ In this example, the logic app workflow starts with the [Recurrence trigger](../ ![Screenshot showing the designer search box with "sql server" and "Azure" selected underneath with the "Get row" action selected in the "Actions" list.](./media/connectors-create-api-sqlazure/select-sql-get-row-action-standard.png) -1. If you're connecting to your SQL database for the first time, you're prompted to [create your SQL database connection now](#create-connection). After you create this connection, you can continue with the next step. +1. If the designer prompts you for connection information, [create your SQL database connection now](#create-connection). After you create this connection, you can continue with the next step. 1. If you haven't already provided the SQL server name and database name, provide those values. Otherwise, from the **Table name** list, select the table that you want to use. In the **Row id** property, enter the ID for the record that you want. @@ -249,9 +248,9 @@ In this example, the logic app workflow starts with the [Recurrence trigger](../ ![Screenshot showing Standard workflow designer and "Get row" action with the example "Table name" property value and empty row ID.](./media/connectors-create-api-sqlazure/specify-table-row-id-standard.png) - This action returns only one row from the selected table, and nothing else. To view the data in this row, add other actions, for example, those that create a file that includes the fields from the returned row, and store that file in a cloud storage account. To learn about other available actions for this connector, see the [connector's reference page](/connectors/sql/). + This action returns only one row from the selected table, and nothing else. To view the data in this row, add other actions. For example, such actions might create a file, include the fields from the returned row, and store the file in a cloud storage account. To learn about other available actions for this connector, see the [connector's reference page](/connectors/sql/). -1. When you're done, on the designer toolbar, select **Save**. +1. When you're done, save your workflow. --- @@ -272,7 +271,7 @@ After you provide this information, continue with these steps: To access a SQL Managed Instance without using the on-premises data gateway or integration service environment, you have to [set up the public endpoint on the SQL Managed Instance](/azure/azure-sql/managed-instance/public-endpoint-configure). The public endpoint uses port 3342, so make sure that you specify this port number when you create the connection from your logic app. -The first time that you add either a [SQL Server trigger](#add-sql-trigger) or [SQL Server action](#add-sql-action), and you haven't previously created a connection to your database, you're prompted to complete these steps: +When you add a [SQL Server trigger](#add-sql-trigger) or [SQL Server action](#add-sql-action) without a previously created and active database connection, complete the following steps: 1. For **Connection name**, provide a name to use for your connection. @@ -280,10 +279,10 @@ The first time that you add either a [SQL Server trigger](#add-sql-trigger) or [ | Authentication | Description | |----------------|-------------| - | **Service principal (Azure AD application)** | - Available only for the managed SQL Server connector.

    - Requires an Azure AD application and service principal. For more information, see [Create an Azure AD application and service principal that can access resources using the Azure portal](../active-directory/develop/howto-create-service-principal-portal.md). | - | **Logic Apps Managed Identity** | - Available only for the managed SQL Server connector and ISE SQL Server connector.

    - Requires the following items:

    --- A valid managed identity that's [enabled on your logic app resource](../logic-apps/create-managed-service-identity.md) and has access to your database.

    --- **SQL DB Contributor** role access to the SQL Server resource

    --- **Contributor** access to the resource group that includes the SQL Server resource.

    For more information, see [SQL - Server-Level Roles](/sql/relational-databases/security/authentication-access/server-level-roles). | - | [**Azure AD Integrated**](/azure/azure-sql/database/authentication-aad-overview) | - Available only for the managed SQL Server connector and ISE SQL Server connector.

    - Requires a valid managed identity in Azure Active Directory (Azure AD) that's [enabled on your logic app resource](../logic-apps/create-managed-service-identity.md) and has access to your database. For more information, see these topics:

    - [Azure SQL Security Overview - Authentication](/azure/azure-sql/database/security-overview#authentication)
    - [Authorize database access to Azure SQL - Authentication and authorization](/azure/azure-sql/database/logins-create-manage#authentication-and-authorization)
    - [Azure SQL - Azure AD Integrated authentication](/azure/azure-sql/database/authentication-aad-overview) | - | [**SQL Server Authentication**](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication) | - Available only for the managed SQL Server connector and ISE SQL Server connector.

    - Requires the following items:

    --- A data gateway resource that's previously created in Azure for your connection, regardless whether your logic app is in multi-tenant Azure Logic Apps or an ISE.

    --- A valid user name and strong password that are created and stored in your SQL Server database. For more information, see the following topics:

    - [Azure SQL Security Overview - Authentication](/azure/azure-sql/database/security-overview#authentication)
    - [Authorize database access to Azure SQL - Authentication and authorization](/azure/azure-sql/database/logins-create-manage#authentication-and-authorization) | + | **Service principal (Azure AD application)** | - Available only for the SQL Server managed connector.

    - Requires an Azure AD application and service principal. For more information, see [Create an Azure AD application and service principal that can access resources using the Azure portal](../active-directory/develop/howto-create-service-principal-portal.md). | + | **Logic Apps Managed Identity** | - Available only for the SQL Server managed connector and ISE-versioned connector.

    - Requires the following items:

    --- A valid managed identity that's [enabled on your logic app resource](../logic-apps/create-managed-service-identity.md) and has access to your database.

    --- **SQL DB Contributor** role access to the SQL Server resource

    --- **Contributor** access to the resource group that includes the SQL Server resource.

    For more information, see [SQL - Server-Level Roles](/sql/relational-databases/security/authentication-access/server-level-roles). | + | [**Azure AD Integrated**](/azure/azure-sql/database/authentication-aad-overview) | - Available only for the SQL Server managed connector and ISE-versioned connector.

    - Requires a valid managed identity in Azure Active Directory (Azure AD) that's [enabled on your logic app resource](../logic-apps/create-managed-service-identity.md) and has access to your database. For more information, see these topics:

    - [Azure SQL Security Overview - Authentication](/azure/azure-sql/database/security-overview#authentication)
    - [Authorize database access to Azure SQL - Authentication and authorization](/azure/azure-sql/database/logins-create-manage#authentication-and-authorization)
    - [Azure SQL - Azure AD Integrated authentication](/azure/azure-sql/database/authentication-aad-overview) | + | [**SQL Server Authentication**](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication) | - Available only for the SQL Server managed connector and ISE-versioned connector.

    - Requires the following items:

    --- A data gateway resource that's previously created in Azure for your connection, regardless whether your logic app is in multi-tenant Azure Logic Apps or an ISE.

    --- A valid user name and strong password that are created and stored in your SQL Server database. For more information, see the following topics:

    - [Azure SQL Security Overview - Authentication](/azure/azure-sql/database/security-overview#authentication)
    - [Authorize database access to Azure SQL - Authentication and authorization](/azure/azure-sql/database/logins-create-manage#authentication-and-authorization) | This connection and authentication information box looks similar to the following example, which selects **Azure AD Integrated**: @@ -332,7 +331,7 @@ The first time that you add either a [SQL Server trigger](#add-sql-trigger) or [ ### Connect to on-premises SQL Server -The first time that you add either a [SQL trigger](#add-sql-trigger) or [SQL action](#add-sql-action), and you haven't previously created a connection to your database, you're prompted to complete these steps: +When you add a [SQL Server trigger](#add-sql-trigger) or [SQL Server action](#add-sql-action) without a previously created and active database connection, complete the following steps: 1. For connections to your on-premises SQL server that require the on-premises data gateway, make sure that you've [completed these prerequisites](#multi-tenant-or-ise). @@ -342,8 +341,8 @@ The first time that you add either a [SQL trigger](#add-sql-trigger) or [SQL act | Authentication | Description | |----------------|-------------| - | [**SQL Server Authentication**](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication) | - Available only for the managed SQL Server connector and ISE SQL Server connector.

    - Requires the following items:

    --- A data gateway resource that's previously created in Azure for your connection, regardless whether your logic app is in multi-tenant Azure Logic Apps or an ISE.

    --- A valid user name and strong password that are created and stored in your SQL Server.

    For more information, see [SQL Server Authentication](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication). | - | [**Windows Authentication**](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-windows-authentication) | - Available only for the managed SQL Server connector.

    - Requires the following items:

    --- A data gateway resource that's previously created in Azure for your connection, regardless whether your logic app is in multi-tenant Azure Logic Apps or an ISE.

    --- A valid Windows user name and password to confirm your identity through your Windows account.

    For more information, see [Windows Authentication](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-windows-authentication). | + | [**SQL Server Authentication**](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication) | - Available only for the SQL Server managed connector and ISE-versioned connector.

    - Requires the following items:

    --- A data gateway resource that's previously created in Azure for your connection, regardless whether your logic app is in multi-tenant Azure Logic Apps or an ISE.

    --- A valid user name and strong password that are created and stored in your SQL Server.

    For more information, see [SQL Server Authentication](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication). | + | [**Windows Authentication**](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-windows-authentication) | - Available only for the SQL Server managed connector.

    - Requires the following items:

    --- A data gateway resource that's previously created in Azure for your connection, regardless whether your logic app is in multi-tenant Azure Logic Apps or an ISE.

    --- A valid Windows user name and password to confirm your identity through your Windows account.

    For more information, see [Windows Authentication](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-windows-authentication). | ||| 1. Select or provide the following values for your SQL database: @@ -384,18 +383,19 @@ The first time that you add either a [SQL trigger](#add-sql-trigger) or [SQL act ## Handle bulk data -Sometimes, you have to work with result sets so large that the connector doesn't return all the results at the same time, or you want better control over the size and structure for your result sets. Here's some ways that you can handle such large result sets: +Sometimes, you work with result sets so large that the connector doesn't return all the results at the same time. Or, you want better control over the size and structure for your result sets. The following list includes some ways that you can handle such large result sets: * To help you manage results as smaller sets, turn on *pagination*. For more information, see [Get bulk data, records, and items by using pagination](../logic-apps/logic-apps-exceed-default-page-size-with-pagination.md). For more information, see [SQL Pagination for bulk data transfer with Logic Apps](https://social.technet.microsoft.com/wiki/contents/articles/40060.sql-pagination-for-bulk-data-transfer-with-logic-apps.aspx). -* Create a [*stored procedure*](/sql/relational-databases/stored-procedures/stored-procedures-database-engine) that organizes the results the way that you want. The SQL connector provides many backend features that you can access by using Azure Logic Apps so that you can more easily automate business tasks that work with SQL database tables. +* Create a [*stored procedure*](/sql/relational-databases/stored-procedures/stored-procedures-database-engine) that organizes the results the way that you want. The SQL Server connector provides many backend features that you can access by using Azure Logic Apps so that you can more easily automate business tasks that work with SQL database tables. When a SQL action gets or inserts multiple rows, your logic app workflow can iterate through these rows by using an [*until loop*](../logic-apps/logic-apps-control-flow-loops.md#until-loop) within these [limits](../logic-apps/logic-apps-limits-and-config.md). However, when your logic app has to work with record sets so large, for example, thousands or millions of rows, that you want to minimize the costs resulting from calls to the database. To organize the results in the way that you want, you can create a stored procedure that runs in your SQL instance and uses the **SELECT - ORDER BY** statement. This solution gives you more control over the size and structure of your results. Your logic app calls the stored procedure by using the SQL Server connector's **Execute stored procedure** action. For more information, see [SELECT - ORDER BY Clause](/sql/t-sql/queries/select-order-by-clause-transact-sql). > [!NOTE] - > The SQL connector has a stored procedure timeout limit that's [less than 2-minutes](/connectors/sql/#known-issues-and-limitations). + > + > The SQL Server connector has a stored procedure timeout limit that's [less than 2 minutes](/connectors/sql/#known-issues-and-limitations). > Some stored procedures might take longer than this limit to complete, causing a `504 Timeout` error. You can work around this problem > by using a SQL completion trigger, native SQL pass-through query, a state table, and server-side jobs. > @@ -404,7 +404,7 @@ Sometimes, you have to work with result sets so large that the connector doesn't > [SQL Server on premises](/sql/sql-server/sql-server-technical-documentation) > and [SQL Managed Instance](/azure/azure-sql/managed-instance/sql-managed-instance-paas-overview), > you can use the [SQL Server Agent](/sql/ssms/agent/sql-server-agent). To learn more, see - > [Handle long-running stored procedure timeouts in the SQL connector for Azure Logic Apps](../logic-apps/handle-long-running-stored-procedures-sql-connector.md). + > [Handle long-running stored procedure timeouts in the SQL Server connector for Azure Logic Apps](../logic-apps/handle-long-running-stored-procedures-sql-connector.md). ### Handle dynamic bulk data @@ -414,7 +414,7 @@ When you call a stored procedure by using the SQL Server connector, the returned 1. View the output format by performing a test run. Copy and save your sample output. -1. In the designer, under the action where you call the stored procedure, select **New step**. +1. In the designer, under the action where you call the stored procedure, add a new action. 1. In the **Choose an operation** box, find and select the action named [**Parse JSON**](../logic-apps/logic-apps-perform-data-operations.md#parse-json-action). @@ -423,10 +423,13 @@ When you call a stored procedure by using the SQL Server connector, the returned 1. In the **Enter or paste a sample JSON payload** box, paste your sample output, and select **Done**. > [!NOTE] - > If you get an error that Logic Apps can't generate a schema, check that your sample output's syntax is correctly formatted. - > If you still can't generate the schema, in the **Schema** box, manually enter the schema. + > + > If you get an error that Azure Logic Apps can't generate a schema, + > check that your sample output's syntax is correctly formatted. + > If you still can't generate the schema, in the **Schema** box, + > manually enter the schema. -1. On the designer toolbar, select **Save**. +1. When you're done, save your workflow. 1. To reference the JSON content properties, click inside the edit boxes where you want to reference those properties so that the dynamic content list appears. In the list, under the [**Parse JSON**](../logic-apps/logic-apps-perform-data-operations.md#parse-json-action) heading, select the data tokens for the JSON content properties that you want. @@ -436,7 +439,7 @@ When you call a stored procedure by using the SQL Server connector, the returned ### Connection problems -Connection problems can commonly happen, so to troubleshoot and resolve these kinds of issues, review [Solving connectivity errors to SQL Server](https://support.microsoft.com/help/4009936/solving-connectivity-errors-to-sql-server). Here are some examples: +Connection problems can commonly happen, so to troubleshoot and resolve these kinds of issues, review [Solving connectivity errors to SQL Server](https://support.microsoft.com/help/4009936/solving-connectivity-errors-to-sql-server). The following list provides some examples: * **A network-related or instance-specific error occurred while establishing a connection to SQL Server. The server was not found or was not accessible. Verify that the instance name is correct and that SQL Server is configured to allow remote connections.** diff --git a/articles/connectors/connectors-native-recurrence.md b/articles/connectors/connectors-native-recurrence.md index 55534914a6de..e6b6e2165c0f 100644 --- a/articles/connectors/connectors-native-recurrence.md +++ b/articles/connectors/connectors-native-recurrence.md @@ -5,7 +5,7 @@ services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: how-to -ms.date: 01/24/2022 +ms.date: 05/27/2022 --- # Create, schedule, and run recurring tasks and workflows with the Recurrence trigger in Azure Logic Apps @@ -55,12 +55,14 @@ For differences between this trigger and the Sliding Window trigger or for more |||||| > [!IMPORTANT] - > If you use the **Day** or **Week** frequency and specify a future date and time, make sure that you set up the recurrence in advance: + > If you use the **Day**, **Week**, or **Month** frequency, and you specify a future date and time, make sure that you set up the recurrence in advance: > > * **Day**: Set up the daily recurrence at least 24 hours in advance. > > * **Week**: Set up the weekly recurrence at least 7 days in advance. > + > * **Month**: Set up the monthly recurrence at least one month in advance. + > > Otherwise, the workflow might skip the first recurrence. > > If a recurrence doesn't specify a specific [start date and time](../logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md#start-time), the first recurrence runs immediately diff --git a/articles/connectors/managed.md b/articles/connectors/managed.md index bf05ff259b5d..f2e45fa3bf55 100644 --- a/articles/connectors/managed.md +++ b/articles/connectors/managed.md @@ -584,7 +584,7 @@ For more information, see these topics: [youtube-icon]: ./media/apis-list/youtube.png -[apache-impala-doc]: /connectors/azureimpala/ "Connect to your Impala database to read data from tables" +[apache-impala-doc]: /connectors/impala/ "Connect to your Impala database to read data from tables" [azure-automation-doc]: /connectors/azureautomation/ "Create and manage automation jobs for your cloud and on-premises infrastructure" [azure-blob-storage-doc]: ./connectors-create-api-azureblobstorage.md "Manage files in your blob container with Azure blob storage connector" [azure-cosmos-db-doc]: ./connectors-create-api-cosmos-db.md "Connect to Azure Cosmos DB so that you can access and manage Azure Cosmos DB documents" @@ -646,4 +646,4 @@ For more information, see these topics: [x12-encode-doc]: ../logic-apps/logic-apps-enterprise-integration-X12-encode.md "Encode messages that use the X12 protocol" -[gateway-doc]: ../logic-apps/logic-apps-gateway-connection.md "Connect to data sources on-premises from logic apps with on-premises data gateway" \ No newline at end of file +[gateway-doc]: ../logic-apps/logic-apps-gateway-connection.md "Connect to data sources on-premises from logic apps with on-premises data gateway" diff --git a/articles/container-apps/quickstart-portal.md b/articles/container-apps/quickstart-portal.md index 278dab8ebcfd..19a646c5a2bd 100644 --- a/articles/container-apps/quickstart-portal.md +++ b/articles/container-apps/quickstart-portal.md @@ -18,7 +18,7 @@ In this quickstart, you create a secure Container Apps environment and deploy yo ## Prerequisites -An Azure account with an active subscription is required. If you don't already have one, you can [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +An Azure account with an active subscription is required. If you don't already have one, you can [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). Also, please make sure to have the Resource Provider "Microsoft.App" registered. ## Setup diff --git a/articles/container-registry/buffer-gate-public-content.md b/articles/container-registry/buffer-gate-public-content.md index 65b396bcef68..b3e8414fbd1b 100644 --- a/articles/container-registry/buffer-gate-public-content.md +++ b/articles/container-registry/buffer-gate-public-content.md @@ -63,15 +63,14 @@ For details, see [Docker Hub authenticated pulls on App Service](https://azure.g To begin managing copies of public images, you can create an Azure container registry if you don't already have one. Create a registry using the [Azure CLI](container-registry-get-started-azure-cli.md), [Azure portal](container-registry-get-started-portal.md), [Azure PowerShell](container-registry-get-started-powershell.md), or other tools. +# [Azure CLI](#tab/azure-cli) + As a recommended one-time step, [import](container-registry-import-images.md) base images and other public content to your Azure container registry. The [az acr import](/cli/azure/acr#az-acr-import) command in the Azure CLI supports image import from public registries such as Docker Hub and Microsoft Container Registry and from other private container registries. `az acr import` doesn't require a local Docker installation. You can run it with a local installation of the Azure CLI or directly in Azure Cloud Shell. It supports images of any OS type, multi-architecture images, or OCI artifacts such as Helm charts. Depending on your organization's needs, you can import to a dedicated registry or a repository in a shared registry. -# [Azure CLI](#tab/azure-cli) -Example: - ```azurecli-interactive az acr import \ --name myregistry \ @@ -81,18 +80,28 @@ az acr import \ --password ``` -# [PowerShell](#tab/azure-powershell) -Example: +# [Azure PowerShell](#tab/azure-powershell) + +As a recommended one-time step, [import](container-registry-import-images.md) base images and other public content to your Azure container registry. The [Import-AzContainerRegistryImage](/powershell/module/az.containerregistry/import-azcontainerregistryimage) command in the Azure PowerShell supports image import from public registries such as Docker Hub and Microsoft Container Registry and from other private container registries. + +`Import-AzContainerRegistryImage` doesn't require a local Docker installation. You can run it with a local installation of the Azure PowerShell or directly in Azure Cloud Shell. It supports images of any OS type, multi-architecture images, or OCI artifacts such as Helm charts. + +Depending on your organization's needs, you can import to a dedicated registry or a repository in a shared registry. ```azurepowershell-interactive -Import-AzContainerRegistryImage - -SourceImage library/busybox:latest - -ResourceGroupName $resourceGroupName - -RegistryName $RegistryName - -SourceRegistryUri docker.io - -TargetTag busybox:latest +$Params = @{ + SourceImage = 'library/busybox:latest' + ResourceGroupName = $resourceGroupName + RegistryName = $RegistryName + SourceRegistryUri = 'docker.io' + TargetTag = 'busybox:latest' +} +Import-AzContainerRegistryImage @Params ``` - Credentials are required if the source registry is not available publicly or the admin user is disabled. + +Credentials are required if the source registry is not available publicly or the admin user is disabled. + +--- ## Update image references diff --git a/articles/container-registry/container-registry-tasks-base-images.md b/articles/container-registry/container-registry-tasks-base-images.md index 7cf1dce4d50c..e060c0d25934 100644 --- a/articles/container-registry/container-registry-tasks-base-images.md +++ b/articles/container-registry/container-registry-tasks-base-images.md @@ -67,7 +67,7 @@ See the following tutorials for scenarios to automate application image builds a * [Automate container image builds when a base image is updated in the same registry](container-registry-tutorial-base-image-update.md) -* [Automate container image builds when a base image is updated in a different registry](container-registry-tutorial-base-image-update.md) +* [Automate container image builds when a base image is updated in a different registry](container-registry-tutorial-private-base-image-update.md) diff --git a/articles/cosmos-db/TOC.yml b/articles/cosmos-db/TOC.yml index e5036dbbf4b7..a2cb55da1af2 100644 --- a/articles/cosmos-db/TOC.yml +++ b/articles/cosmos-db/TOC.yml @@ -331,6 +331,10 @@ - name: Ternary and coalesce operators displayName: ternary, coalesce, operators href: sql/sql-query-ternary-coalesce-operators.md + - name: Bitwise operators + displayName: bitwise, binary, operators + href: sql/sql-query-bitwise-operators.md + - name: Functions items: - name: User-defined functions diff --git a/articles/cosmos-db/cassandra/migrate-data-databricks.md b/articles/cosmos-db/cassandra/migrate-data-databricks.md index 0a85cbd7fd8d..6b96938b3d56 100644 --- a/articles/cosmos-db/cassandra/migrate-data-databricks.md +++ b/articles/cosmos-db/cassandra/migrate-data-databricks.md @@ -52,6 +52,9 @@ Select **Install**, and then restart the cluster when installation is complete. > [!NOTE] > Make sure that you restart the Databricks cluster after the Cassandra Connector library has been installed. +> [!WARNING] +> The samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. + ## Create Scala Notebook for migration Create a Scala Notebook in Databricks. Replace your source and target Cassandra configurations with the corresponding credentials, and source and target keyspaces and tables. Then run the following code: diff --git a/articles/cosmos-db/cassandra/spark-create-operations.md b/articles/cosmos-db/cassandra/spark-create-operations.md index 4688987a17d4..bbbf4cb8cfdb 100644 --- a/articles/cosmos-db/cassandra/spark-create-operations.md +++ b/articles/cosmos-db/cassandra/spark-create-operations.md @@ -47,7 +47,10 @@ spark.conf.set("spark.cassandra.connection.keep_alive_ms", "600000000") ``` > [!NOTE] -> If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). +> If you are using Spark 3.0, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). + +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. ## Dataframe API diff --git a/articles/cosmos-db/cassandra/spark-databricks.md b/articles/cosmos-db/cassandra/spark-databricks.md index 76abf81cac26..3aef582aea04 100644 --- a/articles/cosmos-db/cassandra/spark-databricks.md +++ b/articles/cosmos-db/cassandra/spark-databricks.md @@ -49,7 +49,10 @@ This article details how to work with Azure Cosmos DB Cassandra API from Spark o * **Azure Cosmos DB Cassandra API-specific library:** - If you are using Spark 2.x, a custom connection factory is required to configure the retry policy from the Cassandra Spark connector to Azure Cosmos DB Cassandra API. Add the `com.microsoft.azure.cosmosdb:azure-cosmos-cassandra-spark-helper:1.2.0`[maven coordinates](https://search.maven.org/artifact/com.microsoft.azure.cosmosdb/azure-cosmos-cassandra-spark-helper/1.2.0/jar) to attach the library to the cluster. > [!NOTE] -> If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB Cassandra API-specific library mentioned above. +> If you are using Spark 3.0, you do not need to install the Cosmos DB Cassandra API-specific library mentioned above. + +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. ## Sample notebooks diff --git a/articles/cosmos-db/cassandra/spark-ddl-operations.md b/articles/cosmos-db/cassandra/spark-ddl-operations.md index 2ccc90de47b1..2ecb2c565fff 100644 --- a/articles/cosmos-db/cassandra/spark-ddl-operations.md +++ b/articles/cosmos-db/cassandra/spark-ddl-operations.md @@ -51,7 +51,10 @@ spark.conf.set("spark.cassandra.connection.keep_alive_ms", "600000000") ``` > [!NOTE] -> If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). +> If you are using Spark 3.0, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). + +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. ## Keyspace DDL operations diff --git a/articles/cosmos-db/cassandra/spark-delete-operation.md b/articles/cosmos-db/cassandra/spark-delete-operation.md index 7c0624f7e29f..a10696a26abd 100644 --- a/articles/cosmos-db/cassandra/spark-delete-operation.md +++ b/articles/cosmos-db/cassandra/spark-delete-operation.md @@ -47,7 +47,10 @@ spark.conf.set("spark.cassandra.connection.keep_alive_ms", "600000000") ``` > [!NOTE] -> If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). However, when using operations that require spark context (for example, `CassandraConnector(sc)` for `delete` as shown below), connection properties need to be defined at the cluster level. +> If you are using Spark 3.0, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). However, when using operations that require spark context (for example, `CassandraConnector(sc)` for `delete` as shown below), connection properties need to be defined at the cluster level. + +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. ## Sample data generator We will use this code fragment to generate sample data: diff --git a/articles/cosmos-db/cassandra/spark-read-operation.md b/articles/cosmos-db/cassandra/spark-read-operation.md index 2cef5683534a..ea55d94fba7b 100644 --- a/articles/cosmos-db/cassandra/spark-read-operation.md +++ b/articles/cosmos-db/cassandra/spark-read-operation.md @@ -48,7 +48,10 @@ spark.conf.set("spark.cassandra.connection.keep_alive_ms", "600000000") ``` > [!NOTE] -> If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector(see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). +> If you are using Spark 3.0, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector(see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). + +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. ## Dataframe API diff --git a/articles/cosmos-db/cassandra/spark-table-copy-operations.md b/articles/cosmos-db/cassandra/spark-table-copy-operations.md index c51495557555..e3760a48b957 100644 --- a/articles/cosmos-db/cassandra/spark-table-copy-operations.md +++ b/articles/cosmos-db/cassandra/spark-table-copy-operations.md @@ -49,6 +49,9 @@ spark.conf.set("spark.cassandra.connection.keep_alive_ms", "600000000") > [!NOTE] > If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. + ## Insert sample data ```scala val booksDF = Seq( diff --git a/articles/cosmos-db/cassandra/spark-upsert-operations.md b/articles/cosmos-db/cassandra/spark-upsert-operations.md index 1ec329b2e20c..60d11188f148 100644 --- a/articles/cosmos-db/cassandra/spark-upsert-operations.md +++ b/articles/cosmos-db/cassandra/spark-upsert-operations.md @@ -47,7 +47,10 @@ spark.conf.set("spark.cassandra.connection.keep_alive_ms", "600000000") ``` > [!NOTE] -> If you are using Spark 3.0 or higher, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). However, when using operations that require spark context (for example, `CassandraConnector(sc)` for `update` as shown below), connection properties need to be defined at the cluster level. +> If you are using Spark 3.0, you do not need to install the Cosmos DB helper and connection factory. You should also use `remoteConnectionsPerExecutor` instead of `connections_per_executor_max` for the Spark 3 connector (see above). You will see that connection related properties are defined within the notebook above. Using the syntax below, connection properties can be defined in this manner without needing to be defined at the cluster level (Spark context initialization). However, when using operations that require spark context (for example, `CassandraConnector(sc)` for `update` as shown below), connection properties need to be defined at the cluster level. + +> [!WARNING] +> The Spark 3 samples shown in this article have been tested with Spark **version 3.0.1** and the corresponding Cassandra Spark Connector **com.datastax.spark:spark-cassandra-connector-assembly_2.12:3.0.0**. Later versions of Spark and/or the Cassandra connector may not function as expected. ## Dataframe API diff --git a/articles/cosmos-db/sql/sql-query-bitwise-operators.md b/articles/cosmos-db/sql/sql-query-bitwise-operators.md new file mode 100644 index 000000000000..7a64c53fe3df --- /dev/null +++ b/articles/cosmos-db/sql/sql-query-bitwise-operators.md @@ -0,0 +1,67 @@ +--- +title: Bitwise operators in Azure Cosmos DB +description: Learn about SQL bitwise operators supported by Azure Cosmos DB. +author: seesharprun +ms.author: sidandrews +ms.reviewer: jucocchi +ms.service: cosmos-db +ms.subservice: cosmosdb-sql +ms.topic: conceptual +ms.date: 05/31/2022 +--- + +# Bitwise operators in Azure Cosmos DB +[!INCLUDE[appliesto-sql-api](../includes/appliesto-sql-api.md)] + + +This article details the bitwise operators supported by Azure Cosmos DB. Bitwise operators are useful for constructing JSON result-sets on the fly. The bitwise operators work similarly to higher-level programming languages like C# and JavaScript. For examples of C# bitwise operators, see [Bitwise and shift operators](/dotnet/csharp/language-reference/operators/bitwise-and-shift-operators). + +## Understanding bitwise operations + +The following table shows the explanations and examples of bitwise operations in the SQL API between two values. + +| Operation | Operator | Description | +| --- | --- | --- | +| **Left shift** | ``<<`` | Shift left-hand value *left* by the specified number of bits. | +| **Right shift** | ``>>`` | Shift left-hand value *right* by the specified number of bits. | +| **Zero-fill (unsigned) right shift** | ``>>>`` | Shift left-hand value *right* by the specified number of bits without filling left-most bits. | +| **AND** | ``&`` | Computes bitwise logical AND. | +| **OR** | ``|`` | Computes bitwise logical OR. | +| **XOR** | ``^`` | Computes bitwise logical exclusive OR. | + + +For example, the following query uses each of the bitwise operators and renders a result. + +```sql +SELECT + (100 >> 2) AS rightShift, + (100 << 2) AS leftShift, + (100 >>> 0) AS zeroFillRightShift, + (100 & 1000) AS logicalAnd, + (100 | 1000) AS logicalOr, + (100 ^ 1000) AS logicalExclusiveOr +``` + +The example query's results as a JSON object. + +```json +[ + { + "rightShift": 25, + "leftShift": 400, + "zeroFillRightShift": 100, + "logicalAnd": 96, + "logicalOr": 1004, + "logicalExclusiveOr": 908 + } +] +``` + +> [!IMPORTANT] +> In this example, the values on the left and right side of the operands are 32-bit integer values. + +## Next steps + +- [Azure Cosmos DB .NET samples](https://github.com/Azure/azure-cosmos-dotnet-v3) +- [Keywords](sql-query-keywords.md) +- [SELECT clause](sql-query-select.md) diff --git a/articles/cost-management-billing/cost-management-billing-faq.yml b/articles/cost-management-billing/cost-management-billing-faq.yml index 3d53fecbad6a..54e125a97a7b 100644 --- a/articles/cost-management-billing/cost-management-billing-faq.yml +++ b/articles/cost-management-billing/cost-management-billing-faq.yml @@ -1,7 +1,7 @@ ### YamlMime:FAQ metadata: title: Cost Management + Billing frequently asked questions (FAQ) - titleSuffix: Azure Cost Management + Billing + titleSuffix: Microsoft Cost Management description: Frequently asked questions and answers. author: bandersmsft ms.reviewer: adwise diff --git a/articles/cost-management-billing/cost-management-billing-overview.md b/articles/cost-management-billing/cost-management-billing-overview.md index 42a6e955e53b..751b82a98177 100644 --- a/articles/cost-management-billing/cost-management-billing-overview.md +++ b/articles/cost-management-billing/cost-management-billing-overview.md @@ -1,6 +1,6 @@ --- title: Overview of Cost Management + Billing -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: You use Cost Management + Billing features to conduct billing administrative tasks and manage billing access to costs. You also use the features to monitor and control Azure spending and to optimize Azure resource use. keywords: author: bandersmsft diff --git a/articles/cost-management-billing/costs/assign-access-acm-data.md b/articles/cost-management-billing/costs/assign-access-acm-data.md index 454ee54128b9..183faadc016c 100644 --- a/articles/cost-management-billing/costs/assign-access-acm-data.md +++ b/articles/cost-management-billing/costs/assign-access-acm-data.md @@ -1,6 +1,6 @@ --- title: Assign access to Cost Management data -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article walks you though assigning permission to Cost Management data for various access scopes. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/aws-integration-manage.md b/articles/cost-management-billing/costs/aws-integration-manage.md index 454ba3c765f8..939813fc988e 100644 --- a/articles/cost-management-billing/costs/aws-integration-manage.md +++ b/articles/cost-management-billing/costs/aws-integration-manage.md @@ -1,6 +1,6 @@ --- title: Manage AWS costs and usage in Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps you understand how to use cost analysis and budgets in Cost Management to manage your AWS costs and usage. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/aws-integration-set-up-configure.md b/articles/cost-management-billing/costs/aws-integration-set-up-configure.md index e1aa0bfa205c..e71c00db3cec 100644 --- a/articles/cost-management-billing/costs/aws-integration-set-up-configure.md +++ b/articles/cost-management-billing/costs/aws-integration-set-up-configure.md @@ -1,6 +1,6 @@ --- title: Set up AWS integration with Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article walks you through setting up and configuring AWS Cost and Usage report integration with Cost Management. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/cost-analysis-built-in-views.md b/articles/cost-management-billing/costs/cost-analysis-built-in-views.md index 39871921ae4a..665589fc77c1 100644 --- a/articles/cost-management-billing/costs/cost-analysis-built-in-views.md +++ b/articles/cost-management-billing/costs/cost-analysis-built-in-views.md @@ -1,6 +1,6 @@ --- title: Use built-in views in Cost analysis -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps you understand when to use which view, how each one provides unique insights about your costs and recommended next steps to investigate further. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/cost-analysis-common-uses.md b/articles/cost-management-billing/costs/cost-analysis-common-uses.md index 8c82a59e820f..66f0d7f0c935 100644 --- a/articles/cost-management-billing/costs/cost-analysis-common-uses.md +++ b/articles/cost-management-billing/costs/cost-analysis-common-uses.md @@ -1,6 +1,6 @@ --- title: Common cost analysis uses in Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article explains how you can get results for common cost analysis tasks in Cost Management. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/cost-management-error-codes.md b/articles/cost-management-billing/costs/cost-management-error-codes.md index 26ae22393c96..29a5c5279ea7 100644 --- a/articles/cost-management-billing/costs/cost-management-error-codes.md +++ b/articles/cost-management-billing/costs/cost-management-error-codes.md @@ -1,6 +1,6 @@ --- title: Troubleshoot common Cost Management errors -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article describes common Cost Management errors and provides information about solutions. author: bandersmsft ms.reviewer: micflan diff --git a/articles/cost-management-billing/costs/cost-mgt-alerts-monitor-usage-spending.md b/articles/cost-management-billing/costs/cost-mgt-alerts-monitor-usage-spending.md index 5a4757d26cf5..2eeb5f1f5ee1 100644 --- a/articles/cost-management-billing/costs/cost-mgt-alerts-monitor-usage-spending.md +++ b/articles/cost-management-billing/costs/cost-mgt-alerts-monitor-usage-spending.md @@ -1,6 +1,6 @@ --- title: Monitor usage and spending with cost alerts in Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article describes how cost alerts help you monitor usage and spending in Cost Management. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/cost-mgt-best-practices.md b/articles/cost-management-billing/costs/cost-mgt-best-practices.md index f4d1c139c17f..276c93688c9b 100644 --- a/articles/cost-management-billing/costs/cost-mgt-best-practices.md +++ b/articles/cost-management-billing/costs/cost-mgt-best-practices.md @@ -1,6 +1,6 @@ --- title: Optimize your cloud investment with Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps get the most value out of your cloud investments, reduce your costs, and evaluate where your money is being spent. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/get-started-partners.md b/articles/cost-management-billing/costs/get-started-partners.md index 46707a6fa0e4..d1499a99a85f 100644 --- a/articles/cost-management-billing/costs/get-started-partners.md +++ b/articles/cost-management-billing/costs/get-started-partners.md @@ -1,6 +1,6 @@ --- title: Get started with Cost Management for partners -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article explains how partners use Cost Management features and how they enable access for their customers. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/group-filter.md b/articles/cost-management-billing/costs/group-filter.md index bc2367fe58f1..62a0885121ea 100644 --- a/articles/cost-management-billing/costs/group-filter.md +++ b/articles/cost-management-billing/costs/group-filter.md @@ -1,6 +1,6 @@ --- title: Group and filter options in Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article explains how to use group and filter options in Cost Management. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/ingest-azure-usage-at-scale.md b/articles/cost-management-billing/costs/ingest-azure-usage-at-scale.md index 2c49b55b5e64..db9cd874e63a 100644 --- a/articles/cost-management-billing/costs/ingest-azure-usage-at-scale.md +++ b/articles/cost-management-billing/costs/ingest-azure-usage-at-scale.md @@ -1,6 +1,6 @@ --- title: Retrieve large cost datasets recurringly with exports from Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps you regularly export large amounts of data with exports from Cost Management. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/reporting-get-started.md b/articles/cost-management-billing/costs/reporting-get-started.md index 046f9112d40c..cd9936d616d4 100644 --- a/articles/cost-management-billing/costs/reporting-get-started.md +++ b/articles/cost-management-billing/costs/reporting-get-started.md @@ -65,5 +65,5 @@ For more information about credits, see [Track Microsoft Customer Agreement Azur - [Explore and analyze costs with cost analysis](quick-acm-cost-analysis.md). - [Analyze Azure costs with the Power BI App](analyze-cost-data-azure-cost-management-power-bi-template-app.md). -- [Connect to Azure Cost Management data in Power BI Desktop](/power-bi/connect-data/desktop-connect-azure-cost-management). +- [Connect to Microsoft Cost Management data in Power BI Desktop](/power-bi/connect-data/desktop-connect-azure-cost-management). - [Create and manage exported data](tutorial-export-acm-data.md). \ No newline at end of file diff --git a/articles/cost-management-billing/costs/save-share-views.md b/articles/cost-management-billing/costs/save-share-views.md index fd307690a69d..8a513a9039aa 100644 --- a/articles/cost-management-billing/costs/save-share-views.md +++ b/articles/cost-management-billing/costs/save-share-views.md @@ -1,6 +1,6 @@ --- title: Save and share customized views -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article explains how to save and share a customized view with others. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/tutorial-export-acm-data.md b/articles/cost-management-billing/costs/tutorial-export-acm-data.md index 389ad752a27e..42d318210004 100644 --- a/articles/cost-management-billing/costs/tutorial-export-acm-data.md +++ b/articles/cost-management-billing/costs/tutorial-export-acm-data.md @@ -1,6 +1,6 @@ --- title: Tutorial - Create and manage exported data from Cost Management -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article shows you how you can create and manage exported Cost Management data so that you can use it in external systems. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/understand-cost-mgt-data.md b/articles/cost-management-billing/costs/understand-cost-mgt-data.md index 42f2059a7f9a..54db5ed4f80f 100644 --- a/articles/cost-management-billing/costs/understand-cost-mgt-data.md +++ b/articles/cost-management-billing/costs/understand-cost-mgt-data.md @@ -1,6 +1,6 @@ --- title: Understand Cost Management data -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps you better understand data that's included in Cost Management and how frequently it's processed, collected, shown, and closed. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/costs/understand-work-scopes.md b/articles/cost-management-billing/costs/understand-work-scopes.md index b74072f6e294..02c9b039596a 100644 --- a/articles/cost-management-billing/costs/understand-work-scopes.md +++ b/articles/cost-management-billing/costs/understand-work-scopes.md @@ -1,6 +1,6 @@ --- title: Understand and work with Cost Management scopes -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps you understand billing and resource management scopes available in Azure and how to use the scopes in Cost Management and APIs. author: bandersmsft ms.author: banders diff --git a/articles/cost-management-billing/index.yml b/articles/cost-management-billing/index.yml index bc75cce295f0..156372581c27 100644 --- a/articles/cost-management-billing/index.yml +++ b/articles/cost-management-billing/index.yml @@ -4,7 +4,7 @@ title: Cost Management + Billing documentation summary: Cost Management + Billing helps you understand your Azure invoice (bill), manage your billing account and subscriptions, monitor and control Azure spending and optimize resource use. Learn how to analyze costs, create and manage budgets, export data, and review and act on recommendations. metadata: title: Cost Management + Billing - titleSuffix: Azure Cost Management + Billing documentation + titleSuffix: Microsoft Cost Management description: Cost Management + Billing helps you understand your Azure invoice (bill), manage your billing account and subscriptions, monitor and control Azure spending and optimize resource use. Learn how to analyze costs, create and manage budgets, export data, and review and act on recommendations. ms.service: cost-management-billing ms.subservice: common diff --git a/articles/cost-management-billing/manage/elevate-access-global-admin.md b/articles/cost-management-billing/manage/elevate-access-global-admin.md index 2757d963ad78..600ec0766e3a 100644 --- a/articles/cost-management-billing/manage/elevate-access-global-admin.md +++ b/articles/cost-management-billing/manage/elevate-access-global-admin.md @@ -1,6 +1,6 @@ --- title: Elevate access to manage billing accounts -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: Describes how to elevate access for a Global Administrator to manage billing accounts using the Azure portal or REST API. author: bandersmsft ms.reviewer: amberb diff --git a/articles/cost-management-billing/reservations/reservation-amortization.md b/articles/cost-management-billing/reservations/reservation-amortization.md index 0f5f4063d602..5f984be06028 100644 --- a/articles/cost-management-billing/reservations/reservation-amortization.md +++ b/articles/cost-management-billing/reservations/reservation-amortization.md @@ -1,6 +1,6 @@ --- title: View amortized reservation costs -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: This article helps you understand what amortized reservation costs are and how to view them in cost analysis. author: bandersmsft ms.reviewer: primittal diff --git a/articles/cost-management-billing/understand/analyze-unexpected-charges.md b/articles/cost-management-billing/understand/analyze-unexpected-charges.md index c4c4f7dfea07..03d2ecfca4b1 100644 --- a/articles/cost-management-billing/understand/analyze-unexpected-charges.md +++ b/articles/cost-management-billing/understand/analyze-unexpected-charges.md @@ -1,6 +1,6 @@ --- title: Identify anomalies and unexpected changes in cost -titleSuffix: Azure Cost Management + Billing +titleSuffix: Microsoft Cost Management description: Learn how to identify anomalies and unexpected changes in cost. author: bandersmsft ms.reviewer: micflan diff --git a/articles/data-factory/connector-dynamics-crm-office-365.md b/articles/data-factory/connector-dynamics-crm-office-365.md index f01b9bd0665b..7ded58f0ac44 100644 --- a/articles/data-factory/connector-dynamics-crm-office-365.md +++ b/articles/data-factory/connector-dynamics-crm-office-365.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.author: jianleishen author: jianleishen ms.custom: synapse -ms.date: 04/12/2022 +ms.date: 04/24/2022 --- # Copy and transform data in Dynamics 365 (Microsoft Dataverse) or Dynamics CRM using Azure Data Factory or Azure Synapse Analytics @@ -504,7 +504,7 @@ If all of your source records map to the same target entity and your source data ## Mapping data flow properties -When transforming data in mapping data flow, you can read and write to tables from Dynamics. For more information, see the [source transformation](data-flow-source.md) and [sink transformation](data-flow-sink.md) in mapping data flows. You can choose to use a Dynamics dataset or an [inline dataset](data-flow-source.md#inline-datasets) as source and sink type. +When transforming data in mapping data flow, you can read from and write to tables in Dynamics. For more information, see the [source transformation](data-flow-source.md) and [sink transformation](data-flow-sink.md) in mapping data flows. You can choose to use a Dynamics dataset or an [inline dataset](data-flow-source.md#inline-datasets) as source and sink type. ### Source transformation @@ -512,34 +512,32 @@ The below table lists the properties supported by Dynamics. You can edit these p | Name | Description | Required | Allowed values | Data flow script property | | ---- | ----------- | -------- | -------------- | ---------------- | -| Table | If you select Table as input, data flow fetches all the data from the table specified in the dataset. | No | - | tableName | +| Entity name| The logical name of the entity to retrieve. | Yes when use inline dataset | - | *(for inline dataset only)*
    entity | | Query |FetchXML is a proprietary query language that is used in Dynamics online and on-premises. See the following example. To learn more, see [Build queries with FetchXML](/previous-versions/dynamicscrm-2016/developers-guide/gg328332(v=crm.8)). | No | String | query | -| Entity | The logical name of the entity to retrieve. | Yes when use inline mode | - | entity| > [!Note] > If you select **Query** as input type, the column type from tables can not be retrieved. It will be treated as string by default. #### Dynamics source script example -When you use Dynamics as source type, the associated data flow script is: +When you use Dynamics dataset as source type, the associated data flow script is: ``` -source( - output( - new_name as string, - new_dataflowtestid as string - ), - store: 'dynamics', - format: 'dynamicsformat', - baseUrl: $baseUrl, - cloudType:'AzurePublic', - servicePrincipalId:$servicePrincipalId, - servicePrincipalCredential:$servicePrincipalCredential, - entity:'new_datalowtest' -query:' ' - ) ~> movies +source(allowSchemaDrift: true, + validateSchema: false, + query: '') ~> DynamicsSource +``` + +If you use inline dataset, the associated data flow script is: ``` +source(allowSchemaDrift: true, + validateSchema: false, + store: 'dynamics', + format: 'dynamicsformat', + entity: 'Entity1', + query: '') ~> DynamicsSource +``` ### Sink transformation @@ -547,39 +545,41 @@ The below table lists the properties supported by Dynamics sink. You can edit th | Name | Description | Required | Allowed values | Data flow script property | | ---- | ----------- | -------- | -------------- | ---------------- | -| Entity | The logical name of the entity to retrieve. | Yes when use inline mode | - | entity| -| Request interval | The interval time between API requests in millisecond. | No | - | requestInterval| -| Update method | Specify what operations are allowed on your database destination. The default is to only allow inserts.
    To update, upsert, or delete rows, an [Alter row transformation](data-flow-alter-row.md) is required to tag rows for those actions. | Yes | `true` or `false` | insertable
    updateable
    upsertable
    deletable| | Alternate key name | The alternate key name defined on your entity to do an update, upsert or delete. | No | - | alternateKeyName | +| Update method | Specify what operations are allowed on your database destination. The default is to only allow inserts.
    To update, upsert, or delete rows, an [Alter row transformation](data-flow-alter-row.md) is required to tag rows for those actions. | Yes | `true` or `false` | insertable
    updateable
    upsertable
    deletable| +| Entity name| The logical name of the entity to write. | Yes when use inline dataset | - | *(for inline dataset only)*
    entity| + #### Dynamics sink script example -When you use Dynamics as sink type, the associated data flow script is: +When you use Dynamics dataset as sink type, the associated data flow script is: ``` -moviesAltered sink( - input(new_name as string, - new_id as string, - new_releasedate as string - ), - store: 'dynamics', - format: 'dynamicsformat', - baseUrl: $baseUrl, - - cloudType:'AzurePublic', - servicePrincipalId:$servicePrincipalId, - servicePrincipalCredential:$servicePrincipalCredential, - updateable: true, - upsertable: true, - insertable: true, - deletable:true, - alternateKey:'new_testalternatekey', - entity:'new_dataflow_crud_test', - -requestInterval:1000 - ) ~> movieDB +IncomingStream sink(allowSchemaDrift: true, + validateSchema: false, + deletable:true, + insertable:true, + updateable:true, + upsertable:true, + skipDuplicateMapInputs: true, + skipDuplicateMapOutputs: true) ~> DynamicsSink ``` +If you use inline dataset, the associated data flow script is: + +``` +IncomingStream sink(allowSchemaDrift: true, + validateSchema: false, + store: 'dynamics', + format: 'dynamicsformat', + entity: 'Entity1', + deletable: true, + insertable: true, + updateable: true, + upsertable: true, + skipDuplicateMapInputs: true, + skipDuplicateMapOutputs: true) ~> DynamicsSink +``` ## Lookup activity properties To learn details about the properties, see [Lookup activity](control-flow-lookup-activity.md). diff --git a/articles/data-factory/connector-google-adwords.md b/articles/data-factory/connector-google-adwords.md index 2e76bbbdc019..e140118ec7b0 100644 --- a/articles/data-factory/connector-google-adwords.md +++ b/articles/data-factory/connector-google-adwords.md @@ -8,7 +8,7 @@ ms.service: data-factory ms.subservice: data-movement ms.topic: conceptual ms.custom: synapse -ms.date: 02/24/2022 +ms.date: 05/30/2022 --- # Copy data from Google AdWords using Azure Data Factory or Synapse Analytics @@ -81,7 +81,7 @@ The following properties are supported for Google AdWords linked service: | clientId | The client ID of the Google application used to acquire the refresh token. You can choose to mark this field as a SecureString to store it securely, or store password in Azure Key Vault and let the copy activity pull from there when performing data copy - learn more from [Store credentials in Key Vault](store-credentials-in-key-vault.md). | No | | clientSecret | The client secret of the google application used to acquire the refresh token. You can choose to mark this field as a SecureString to store it securely, or store password in Azure Key Vault and let the copy activity pull from there when performing data copy - learn more from [Store credentials in Key Vault](store-credentials-in-key-vault.md). | No | | email | The service account email ID that is used for ServiceAuthentication and can only be used on self-hosted IR. | No | -| keyFilePath | The full path to the .p12 key file that is used to authenticate the service account email address and can only be used on self-hosted IR. | No | +| keyFilePath | The full path to the `.p12` or `.json` key file that is used to authenticate the service account email address and can only be used on self-hosted IR. | No | | trustedCertPath | The full path of the .pem file containing trusted CA certificates for verifying the server when connecting over TLS. This property can only be set when using TLS on self-hosted IR. The default value is the cacerts.pem file installed with the IR. | No | | useSystemTrustStore | Specifies whether to use a CA certificate from the system trust store or from a specified PEM file. The default value is false. | No | diff --git a/articles/data-factory/connector-google-bigquery.md b/articles/data-factory/connector-google-bigquery.md index 9157cd04e10b..068caf3db1ca 100644 --- a/articles/data-factory/connector-google-bigquery.md +++ b/articles/data-factory/connector-google-bigquery.md @@ -8,7 +8,7 @@ ms.service: data-factory ms.subservice: data-movement ms.topic: conceptual ms.custom: synapse -ms.date: 04/26/2022 +ms.date: 05/30/2022 --- # Copy data from Google BigQuery using Azure Data Factory or Synapse Analytics @@ -117,7 +117,7 @@ Set "authenticationType" property to **ServiceAuthentication**, and specify the | Property | Description | Required | |:--- |:--- |:--- | | email | The service account email ID that is used for ServiceAuthentication. It can be used only on Self-hosted Integration Runtime. | No | -| keyFilePath | The full path to the .p12 key file that is used to authenticate the service account email address. | No | +| keyFilePath | The full path to the `.p12` or `.json` key file that is used to authenticate the service account email address. | No | | trustedCertPath | The full path of the .pem file that contains trusted CA certificates used to verify the server when you connect over TLS. This property can be set only when you use TLS on Self-hosted Integration Runtime. The default value is the cacerts.pem file installed with the integration runtime. | No | | useSystemTrustStore | Specifies whether to use a CA certificate from the system trust store or from a specified .pem file. The default value is **false**. | No | @@ -133,7 +133,7 @@ Set "authenticationType" property to **ServiceAuthentication**, and specify the "requestGoogleDriveScope" : true, "authenticationType" : "ServiceAuthentication", "email": "", - "keyFilePath": "<.p12 key path on the IR machine>" + "keyFilePath": "<.p12 or .json key path on the IR machine>" }, "connectVia": { "referenceName": "", diff --git a/articles/defender-for-cloud/TOC.yml b/articles/defender-for-cloud/TOC.yml index 9953ccf10963..17f87e0479c2 100644 --- a/articles/defender-for-cloud/TOC.yml +++ b/articles/defender-for-cloud/TOC.yml @@ -232,6 +232,8 @@ - name: Overview of Defender for Containers displayName: kubernetes, aks, acr, registries, k8s, arc, hybrid, on-premises, azure arc, multicloud href: defender-for-containers-introduction.md + - name: How does Defender for Containers work? + href: defender-for-containers-architecture.md - name: Enable Defender for Containers displayName: kubernetes, aks, acr, registries, k8s, arc, hybrid, on-premises, azure arc, multicloud href: defender-for-containers-enable.md diff --git a/articles/defender-for-cloud/defender-for-cloud-introduction.md b/articles/defender-for-cloud/defender-for-cloud-introduction.md index ab27518fbabb..65937640f6ad 100644 --- a/articles/defender-for-cloud/defender-for-cloud-introduction.md +++ b/articles/defender-for-cloud/defender-for-cloud-introduction.md @@ -9,54 +9,48 @@ ms.date: 05/19/2022 --- # What is Microsoft Defender for Cloud? -Microsoft Defender for Cloud is a Cloud Workload Protection Platform (CWPP) that also delivers Cloud Security Posture Management (CSPM) for all of your Azure, on-premises, and multicloud (Amazon AWS and Google GCP) resources. - -- [**Defender for Cloud recommendations**](security-policy-concept.md) identify cloud workloads that require security actions and provide you with steps to protect your workloads from security risks. -- [**Defender for Cloud secure score**](secure-score-security-controls.md) gives you a clear view of your security posture based on the implementation of the security recommendations so you can track new security opportunities and precisely report on the progress of your security efforts. -- [**Defender for Cloud alerts**](alerts-overview.md) warn you about security events in your workloads in real-time, including the indicators that led to the event. - -Defender for Cloud fills three vital needs as you manage the security of your resources and workloads in the cloud and on-premises: +Microsoft Defender for Cloud is a Cloud Security Posture Management (CSPM) and Cloud Workload Protection Platform (CWPP) for all of your Azure, on-premises, and multi-cloud (Amazon AWS and Google GCP) resources. Defender for Cloud fills three vital needs as you manage the security of your resources and workloads in the cloud and on-premises: :::image type="content" source="media/defender-for-cloud-introduction/defender-for-cloud-synopsis.png" alt-text="Understanding the core functionality of Microsoft Defender for Cloud."::: -|Security requirement | Defender for Cloud solution| -|---------|---------| -|**Continuous assessment** - Understand your current security posture. | **Secure score** - A single score so that you can tell, at a glance, your current security situation: the higher the score, the lower the identified risk level. | -|**Secure** - Harden all connected resources and services. | **Security recommendations** - Customized and prioritized hardening tasks to improve your posture. You implement a recommendation by following the detailed remediation steps provided in the recommendation. For many recommendations, Defender for Cloud offers a "Fix" button for automated implementation!| -|**Defend** - Detect and resolve threats to those resources and services. | **Security alerts** - With the enhanced security features enabled, Defender for Cloud detects threats to your resources and workloads. These alerts appear in the Azure portal and Defender for Cloud can also send them by email to the relevant personnel in your organization. Alerts can also be streamed to SIEM, SOAR, or IT Service Management solutions as required. | +- [**Defender for Cloud secure score**](secure-score-security-controls.md) **continually assesses** your security posture so you can track new security opportunities and precisely report on the progress of your security efforts. +- [**Defender for Cloud recommendations**](security-policy-concept.md) **secures** your workloads with step-by-step actions that protect your workloads from known security risks. +- [**Defender for Cloud alerts**](alerts-overview.md) **defends** your workloads in real-time so you can react immediately and prevent security events from developing. + +For a step-by-step walkthrough of Defender for Cloud, check out this [interactive tutorial](https://mslearn.cloudguides.com/en-us/guides/Protect%20your%20multi-cloud%20environment%20with%20Microsoft%20Defender%20for%20Cloud). -## Posture management and workload protection +## Protect your resources and track your security progress -Microsoft Defender for Cloud's features covers the two broad pillars of cloud security: cloud security posture management and cloud workload protection. +Microsoft Defender for Cloud's features covers the two broad pillars of cloud security: Cloud Workload Protection Platform (CWPP) and Cloud Security Posture Management (CSPM). -### Cloud security posture management (CSPM) +### CSPM - Remediate security issues and watch your security posture improve In Defender for Cloud, the posture management features provide: -- **Visibility** - to help you understand your current security situation - **Hardening guidance** - to help you efficiently and effectively improve your security +- **Visibility** - to help you understand your current security situation -The central feature in Defender for Cloud that enables you to achieve those goals is **secure score**. Defender for Cloud continually assesses your resources, subscriptions, and organization for security issues. It then aggregates all the findings into a single score so that you can tell, at a glance, your current security situation: the higher the score, the lower the identified risk level. +Defender for Cloud continually assesses your resources, subscriptions, and organization for security issues and shows your security posture in **secure score**, an aggregated score of the security findings that tells you, at a glance, your current security situation: the higher the score, the lower the identified risk level. -When you open Defender for Cloud for the first time, it will meet the visibility and strengthening goals as follows: +As soon as you open Defender for Cloud for the first time, Defender for Cloud: -1. **Generate a secure score** for your subscriptions based on an assessment of your connected resources compared with the guidance in [Azure Security Benchmark](/security/benchmark/azure/overview). Use the score to understand your security posture, and the compliance dashboard to review your compliance with the built-in benchmark. When you've enabled the enhanced security features, you can customize the standards used to assess your compliance, and add other regulations (such as NIST and Azure CIS) or organization-specific security requirements. You can also apply recommendations, and score based on the AWS Foundational Security Best practices standards. +- **Generates a secure score** for your subscriptions based on an assessment of your connected resources compared with the guidance in [Azure Security Benchmark](/security/benchmark/azure/overview). Use the score to understand your security posture, and the compliance dashboard to review your compliance with the built-in benchmark. When you've enabled the enhanced security features, you can customize the standards used to assess your compliance, and add other regulations (such as NIST and Azure CIS) or organization-specific security requirements. You can also apply recommendations, and score based on the AWS Foundational Security Best practices standards. -1. **Provide hardening recommendations** based on any identified security misconfigurations and weaknesses. Use these security recommendations to strengthen the security posture of your organization's Azure, hybrid, and multicloud resources. +- **Provides hardening recommendations** based on any identified security misconfigurations and weaknesses. Use these security recommendations to strengthen the security posture of your organization's Azure, hybrid, and multi-cloud resources. [Learn more about secure score](secure-score-security-controls.md). -### Cloud workload protection (CWP) +### CWP - Identify unique workload security requirements -Defender for Cloud offers security alerts that are powered by [Microsoft Threat Intelligence](https://go.microsoft.com/fwlink/?linkid=2128684). It also includes a range of advanced, intelligent, protections for your workloads. The workload protections are provided through Microsoft Defender plans specific to the types of resources in your subscriptions. For example, you can enable **Microsoft Defender for Storage** to get alerted about suspicious activities related to your Azure Storage accounts. +Defender for Cloud offers security alerts that are powered by [Microsoft Threat Intelligence](https://go.microsoft.com/fwlink/?linkid=2128684). It also includes a range of advanced, intelligent, protections for your workloads. The workload protections are provided through Microsoft Defender plans specific to the types of resources in your subscriptions. For example, you can enable **Microsoft Defender for Storage** to get alerted about suspicious activities related to your storage resources. -## Azure, hybrid, and multicloud protections +## Protect all of your resources under one roof -Because Defender for Cloud is an Azure-native service, many Azure services are monitored and protected without needing any deployment. +Because Defender for Cloud is an Azure-native service, many Azure services are monitored and protected without needing any deployment, but you can also add resources the are on-premises or in other public clouds. When necessary, Defender for Cloud can automatically deploy a Log Analytics agent to gather security-related data. For Azure machines, deployment is handled directly. For hybrid and multicloud environments, Microsoft Defender plans are extended to non Azure machines with the help of [Azure Arc](https://azure.microsoft.com/services/azure-arc/). CSPM features are extended to multicloud machines without the need for any agents (see [Defend resources running on other clouds](#defend-resources-running-on-other-clouds)). -### Azure-native protections +### Defend your Azure-native resources Defender for Cloud helps you detect threats across: @@ -66,7 +60,7 @@ Defender for Cloud helps you detect threats across: - **Networks** - Defender for Cloud helps you limit exposure to brute force attacks. By reducing access to virtual machine ports, using the just-in-time VM access, you can harden your network by preventing unnecessary access. You can set secure access policies on selected ports, for only authorized users, allowed source IP address ranges or IP addresses, and for a limited amount of time. -### Defend your hybrid resources +### Defend your on-premises resources In addition to defending your Azure environment, you can add Defender for Cloud capabilities to your hybrid cloud environment to protect your non-Azure servers. To help you focus on what matters the most​, you'll get customized threat intelligence and prioritized alerts according to your specific environment. @@ -84,7 +78,7 @@ For example, if you've [connected an Amazon Web Services (AWS) account](quicksta Learn more about connecting your [AWS](quickstart-onboard-aws.md) and [GCP](quickstart-onboard-gcp.md) accounts to Microsoft Defender for Cloud. -## Vulnerability assessment and management +## Close vulnerabilities before they get exploited :::image type="content" source="media/defender-for-cloud-introduction/defender-for-cloud-expanded-assess.png" alt-text="Focus on the assessment features of Microsoft Defender for Cloud."::: @@ -99,7 +93,7 @@ Learn more on the following pages: - [Defender for Cloud's integrated Qualys scanner for Azure and hybrid machines](deploy-vulnerability-assessment-vm.md) - [Identify vulnerabilities in images in Azure container registries](defender-for-containers-usage.md#identify-vulnerabilities-in-images-in-other-container-registries) -## Optimize and improve security by configuring recommended controls +## Enforce your security policy from the top down :::image type="content" source="media/defender-for-cloud-introduction/defender-for-cloud-expanded-secure.png" alt-text="Focus on the 'secure' features of Microsoft Defender for Cloud."::: @@ -117,17 +111,15 @@ To help you understand how important each recommendation is to your overall secu :::image type="content" source="./media/defender-for-cloud-introduction/sc-secure-score.png" alt-text="Defender for Cloud secure score."::: -## Defend against threats +## Extend Defender for Cloud with Defender plans and external monitoring :::image type="content" source="media/defender-for-cloud-introduction/defender-for-cloud-expanded-defend.png" alt-text="Focus on the 'defend'' features of Microsoft Defender for Cloud."::: -Defender for Cloud provides: - -- **Security alerts** - When Defender for Cloud detects a threat in any area of your environment, it generates a security alert. These alerts describe details of the affected resources, suggested remediation steps, and in some cases an option to trigger a logic app in response. Whether an alert is generated by Defender for Cloud, or received by Defender for Cloud from an integrated security product, you can export it. To export your alerts to Microsoft Sentinel, any third-party SIEM, or any other external tool, follow the instructions in [Stream alerts to a SIEM, SOAR, or IT Service Management solution](export-to-siem.md). Defender for Cloud's threat protection includes fusion kill-chain analysis, which automatically correlates alerts in your environment based on cyber kill-chain analysis, to help you better understand the full story of an attack campaign, where it started and what kind of impact it had on your resources. [Defender for Cloud's supported kill chain intents are based on version 9 of the MITRE ATT&CK matrix](alerts-reference.md#intentions). +You can extend the Defender for Cloud protection with: - **Advanced threat protection features** for virtual machines, SQL databases, containers, web applications, your network, and more - Protections include securing the management ports of your VMs with [just-in-time access](just-in-time-access-overview.md), and [adaptive application controls](adaptive-application-controls.md) to create allowlists for what apps should and shouldn't run on your machines. -The **Defender plans** page of Microsoft Defender for Cloud offers the following plans for comprehensive defenses for the compute, data, and service layers of your environment: +The **Defender plans** of Microsoft Defender for Cloud offer comprehensive defenses for the compute, data, and service layers of your environment: - [Microsoft Defender for Servers](defender-for-servers-introduction.md) - [Microsoft Defender for Storage](defender-for-storage-introduction.md) @@ -145,6 +137,8 @@ Use the advanced protection tiles in the [workload protections dashboard](worklo > [!TIP] > Microsoft Defender for IoT is a separate product. You'll find all the details in [Introducing Microsoft Defender for IoT](../defender-for-iot/overview.md). +- **Security alerts** - When Defender for Cloud detects a threat in any area of your environment, it generates a security alert. These alerts describe details of the affected resources, suggested remediation steps, and in some cases an option to trigger a logic app in response. Whether an alert is generated by Defender for Cloud, or received by Defender for Cloud from an integrated security product, you can export it. To export your alerts to Microsoft Sentinel, any third-party SIEM, or any other external tool, follow the instructions in [Stream alerts to a SIEM, SOAR, or IT Service Management solution](export-to-siem.md). Defender for Cloud's threat protection includes fusion kill-chain analysis, which automatically correlates alerts in your environment based on cyber kill-chain analysis, to help you better understand the full story of an attack campaign, where it started and what kind of impact it had on your resources. [Defender for Cloud's supported kill chain intents are based on version 9 of the MITRE ATT&CK matrix](alerts-reference.md#intentions). + ## Learn More If you would like to learn more about Defender for Cloud from a cybersecurity expert, check out [Lessons Learned from the Field](episode-six.md). diff --git a/articles/defender-for-cloud/defender-for-containers-architecture.md b/articles/defender-for-cloud/defender-for-containers-architecture.md new file mode 100644 index 000000000000..3de83045adfa --- /dev/null +++ b/articles/defender-for-cloud/defender-for-containers-architecture.md @@ -0,0 +1,118 @@ +--- +title: Container security architecture in Microsoft Defender for Cloud +description: Learn about the architecture of Microsoft Defender for Containers for each container platform +author: bmansheim +ms.author: benmansheim +ms.topic: overview +ms.date: 05/31/2022 +--- +# Defender for Containers architecture + +Defender for Containers is designed differently for each container environment whether they're running in: + +- **Azure Kubernetes Service (AKS)** - Microsoft's managed service for developing, deploying, and managing containerized applications. + +- **Amazon Elastic Kubernetes Service (EKS) in a connected Amazon Web Services (AWS) account** - Amazon's managed service for running Kubernetes on AWS without needing to install, operate, and maintain your own Kubernetes control plane or nodes. + +- **Google Kubernetes Engine (GKE) in a connected Google Cloud Platform (GCP) project** - Google’s managed environment for deploying, managing, and scaling applications using GCP infrastructure. + +- **An unmanaged Kubernetes distribution** (using Azure Arc-enabled Kubernetes) - Cloud Native Computing Foundation (CNCF) certified Kubernetes clusters hosted on-premises or on IaaS. + +> [!NOTE] +> Defender for Containers support for Arc-enabled Kubernetes clusters (AWS EKS and GCP GKE) is a preview feature. + +To protect your Kubernetes containers, Defender for Containers receives and analyzes: + +- Audit logs and security events from the API server +- Cluster configuration information from the control plane +- Workload configuration from Azure Policy +- Security signals and events from the node level + +## Architecture for each container environment + +## [**Azure (AKS)**](#tab/defender-for-container-arch-aks) + +### Architecture diagram of Defender for Cloud and AKS clusters + +When Defender for Cloud protects a cluster hosted in Azure Kubernetes Service, the collection of audit log data is agentless and frictionless. + +The **Defender profile (preview)** deployed to each node provides the runtime protections and collects signals from nodes using [eBPF technology](https://ebpf.io/). + +The **Azure Policy add-on for Kubernetes** collects cluster and workload configuration for admission control policies as explained in [Protect your Kubernetes workloads](kubernetes-workload-protections.md). + +> [!NOTE] +> Defender for Containers **Defender profile** is a preview feature. + +:::image type="content" source="./media/defender-for-containers/architecture-aks-cluster.png" alt-text="Diagram of high-level architecture of the interaction between Microsoft Defender for Containers, Azure Kubernetes Service, and Azure Policy." lightbox="./media/defender-for-containers/architecture-aks-cluster.png"::: + +### Defender profile component details + +| Pod Name | Namespace | Kind | Short Description | Capabilities | Resource limits | Egress Required | +|--|--|--|--|--|--|--| +| azuredefender-collector-ds-* | kube-system | [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) | A set of containers that focus on collecting inventory and security events from the Kubernetes environment. | SYS_ADMIN, 
    SYS_RESOURCE,
    SYS_PTRACE | memory: 64Mi

    cpu: 60m | No | +| azuredefender-collector-misc-* | kube-system | [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) | A set of containers that focus on collecting inventory and security events from the Kubernetes environment that aren't bounded to a specific node. | N/A | memory: 64Mi

    cpu: 60m | No | +| azuredefender-publisher-ds-* | kube-system | [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) | Publish the collected data to Microsoft Defender for Containers backend service where the data will be processed for and analyzed. | N/A | memory: 200Mi  

    cpu: 60m | Https 443

    Learn more about the [outbound access prerequisites](../aks/limit-egress-traffic.md#microsoft-defender-for-containers) | + +\* resource limits aren't configurable + +## [**On-premises / IaaS (Arc)**](#tab/defender-for-container-arch-arc) + +### Architecture diagram of Defender for Cloud and Arc-enabled Kubernetes clusters + +For all clusters hosted outside of Azure, [Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md) is required to connect the clusters to Azure and provide Azure services such as Defender for Containers. + +When a non-Azure container is connected to Azure with Arc, the [Arc extension](../azure-arc/kubernetes/extensions.md) collects Kubernetes audit logs data from all control plane nodes in the cluster. The extension sends the log data to the Microsoft Defender for Cloud backend in the cloud for further analysis. The extension is registered with a Log Analytics workspace used as a data pipeline, but the audit log data isn't stored in the Log Analytics workspace. + +Workload configuration information is collected by an Azure Policy add-on. As explained in [this Azure Policy for Kubernetes page](../governance/policy/concepts/policy-for-kubernetes.md), the add-on extends the open-source [Gatekeeper v3](https://github.com/open-policy-agent/gatekeeper) admission controller webhook for [Open Policy Agent](https://www.openpolicyagent.org/). Kubernetes admission controllers are plugins that enforce how your clusters are used. The add-on registers as a web hook to Kubernetes admission control and makes it possible to apply at-scale enforcements and safeguards on your clusters in a centralized, consistent manner. + +> [!NOTE] +> Defender for Containers support for Arc-enabled Kubernetes clusters is a preview feature. + +:::image type="content" source="./media/defender-for-containers/architecture-arc-cluster.png" alt-text="Diagram of high-level architecture of the interaction between Microsoft Defender for Containers, Azure Kubernetes Service, Azure Arc-enabled Kubernetes, and Azure Policy." lightbox="./media/defender-for-containers/architecture-arc-cluster.png"::: + +## [**AWS (EKS)**](#tab/defender-for-container-arch-eks) + +### Architecture diagram of Defender for Cloud and EKS clusters + +These components are required in order to receive the full protection offered by Microsoft Defender for Containers: + +- **[Kubernetes audit logs](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/)** – [AWS account’s CloudWatch](https://aws.amazon.com/cloudwatch/) enables, and collects audit log data through an agentless collector, and sends the collected information to the Microsoft Defender for Cloud backend for further analysis. + +- **[Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md)** - An agent based solution that connects your EKS clusters to Azure. Azure then is capable of providing services such as Defender, and Policy as [Arc extensions](../azure-arc/kubernetes/extensions.md). + +- **The Defender extension** – The [DeamonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) that collects signals from hosts using [eBPF technology](https://ebpf.io/), and provides runtime protection. The extension is registered with a Log Analytics workspace, and used as a data pipeline. However, the audit log data isn't stored in the Log Analytics workspace. + +- **The Azure Policy extension** - The workload's configuration information is collected by the Azure Policy add-on. The Azure Policy add-on extends the open-source [Gatekeeper v3](https://github.com/open-policy-agent/gatekeeper) admission controller webhook for [Open Policy Agent](https://www.openpolicyagent.org/). The extension registers as a web hook to Kubernetes admission control and makes it possible to apply at-scale enforcements, and safeguards on your clusters in a centralized, consistent manner. For more information, see [Understand Azure Policy for Kubernetes clusters](../governance/policy/concepts/policy-for-kubernetes.md). + +> [!NOTE] +> Defender for Containers support for AWS EKS clusters is a preview feature. + +:::image type="content" source="./media/defender-for-containers/architecture-eks-cluster.png" alt-text="Diagram of high-level architecture of the interaction between Microsoft Defender for Containers, Amazon Web Services' EKS clusters, Azure Arc-enabled Kubernetes, and Azure Policy." lightbox="./media/defender-for-containers/architecture-eks-cluster.png"::: + +## [**GCP (GKE)**](#tab/defender-for-container-gke) + +### Architecture diagram of Defender for Cloud and GKE clusters + +These components are required in order to receive the full protection offered by Microsoft Defender for Containers: + +- **[Kubernetes audit logs](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/)** – [GCP Cloud Logging](https://cloud.google.com/logging/) enables, and collects audit log data through an agentless collector, and sends the collected information to the Microsoft Defender for Cloud backend for further analysis. + +- **[Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md)** - An agent based solution that connects your EKS clusters to Azure. Azure then is capable of providing services such as Defender, and Policy as [Arc extensions](../azure-arc/kubernetes/extensions.md). + +- **The Defender extension** – The [DeamonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) that collects signals from hosts using [eBPF technology](https://ebpf.io/), and provides runtime protection. The extension is registered with a Log Analytics workspace, and used as a data pipeline. However, the audit log data isn't stored in the Log Analytics workspace. + +- **The Azure Policy extension** - The workload's configuration information is collected by the Azure Policy add-on. The Azure Policy add-on extends the open-source [Gatekeeper v3](https://github.com/open-policy-agent/gatekeeper) admission controller webhook for [Open Policy Agent](https://www.openpolicyagent.org/). The extension registers as a web hook to Kubernetes admission control and makes it possible to apply at-scale enforcements, and safeguards on your clusters in a centralized, consistent manner. For more information, see [Understand Azure Policy for Kubernetes clusters](../governance/policy/concepts/policy-for-kubernetes.md). + +> [!NOTE] +> Defender for Containers support for GCP GKE clusters is a preview feature. + +:::image type="content" source="./media/defender-for-containers/architecture-gke.png" alt-text="Diagram of high-level architecture of the interaction between Microsoft Defender for Containers, Google GKE clusters, Azure Arc-enabled Kubernetes, and Azure Policy." lightbox="./media/defender-for-containers/architecture-gke.png"::: + +--- + +## Next steps + +In this overview, you learned about the architecture of container security in Microsoft Defender for Cloud. To enable the plan, see: + +> [!div class="nextstepaction"] +> [Enable Defender for Containers](defender-for-containers-enable.md) diff --git a/articles/defender-for-cloud/defender-for-containers-introduction.md b/articles/defender-for-cloud/defender-for-containers-introduction.md index d36ae3a16e65..d179fad3f618 100644 --- a/articles/defender-for-cloud/defender-for-containers-introduction.md +++ b/articles/defender-for-cloud/defender-for-containers-introduction.md @@ -1,15 +1,17 @@ --- title: Container security with Microsoft Defender for Cloud description: Learn about Microsoft Defender for Containers +author: bmansheim +ms.author: benmansheim ms.topic: overview ms.date: 05/25/2022 --- # Overview of Microsoft Defender for Containers -Microsoft Defender for Containers is the cloud-native solution for securing your containers. +Microsoft Defender for Containers is the cloud-native solution for securing your containers so you can improve, monitor, and maintain the security of your clusters, containers, and their applications. -On this page, you'll learn how you can use Defender for Containers to improve, monitor, and maintain the security of your clusters, containers, and their applications. +[How does Defender for Containers work in each Kubernetes platform?](defender-for-containers-architecture.md) ## Microsoft Defender for Containers plan availability @@ -19,18 +21,17 @@ On this page, you'll learn how you can use Defender for Containers to improve, m | Feature availability | Refer to the [availability](supported-machines-endpoint-solutions-clouds-containers.md) section for additional information on feature release state and availability.| | Pricing: | **Microsoft Defender for Containers** is billed as shown on the [pricing page](https://azure.microsoft.com/pricing/details/defender-for-cloud/) | | Required roles and permissions: | • To auto provision the required components, see the [permissions for each of the components](enable-data-collection.md?tabs=autoprovision-containers)
    • **Security admin** can dismiss alerts
    • **Security reader** can view vulnerability assessment findings
    See also [Azure Container Registry roles and permissions](../container-registry/container-registry-roles.md) | -| Clouds: | **Azure**:
    :::image type="icon" source="./media/icons/yes-icon.png"::: Commercial clouds
    :::image type="icon" source="./media/icons/yes-icon.png"::: National clouds (Azure Government, Azure China 21Vianet) (Except for preview features))

    **Non Azure**:
    :::image type="icon" source="./media/icons/yes-icon.png"::: Connected AWS accounts (Preview)
    :::image type="icon" source="./media/icons/yes-icon.png"::: Connected GCP projects (Preview)
    :::image type="icon" source="./media/icons/yes-icon.png"::: On-prem/IaaS supported via Arc enabled Kubernetes (Preview).

    For more details, see the [availability section](supported-machines-endpoint-solutions-clouds-containers.md#defender-for-containers-feature-availability). | - +| Clouds: | **Azure**:
    :::image type="icon" source="./media/icons/yes-icon.png"::: Commercial clouds
    :::image type="icon" source="./media/icons/yes-icon.png"::: National clouds (Azure Government, Azure China 21Vianet) (Except for preview features))

    **Non-Azure**:
    :::image type="icon" source="./media/icons/yes-icon.png"::: Connected AWS accounts (Preview)
    :::image type="icon" source="./media/icons/yes-icon.png"::: Connected GCP projects (Preview)
    :::image type="icon" source="./media/icons/yes-icon.png"::: On-prem/IaaS supported via Arc enabled Kubernetes (Preview).

    For more information about, see the [availability section](supported-machines-endpoint-solutions-clouds-containers.md#defender-for-containers-feature-availability). | ## What are the benefits of Microsoft Defender for Containers? Defender for Containers helps with the core aspects of container security: -- **Environment hardening** - Defender for Containers protects your Kubernetes clusters whether they're running on Azure Kubernetes Service, Kubernetes on-premises / IaaS, or Amazon EKS. By continuously assessing clusters, Defender for Containers provides visibility into misconfigurations and guidelines to help mitigate identified threats. Learn more in [Hardening](#hardening). +- [**Environment hardening**](#hardening) - Defender for Containers protects your Kubernetes clusters whether they're running on Azure Kubernetes Service, Kubernetes on-premises/IaaS, or Amazon EKS. By continuously assessing clusters, Defender for Containers provides visibility into misconfigurations and guidelines to help mitigate identified threats. -- **Vulnerability assessment** - Vulnerability assessment and management tools for images **stored** in ACR registries and **running** in Azure Kubernetes Service. Learn more in [Vulnerability assessment](#vulnerability-assessment). +- [**Vulnerability assessment**](#vulnerability-assessment) - Vulnerability assessment and management tools for images **stored** in ACR registries and **running** in Azure Kubernetes Service. -- **Run-time threat protection for nodes and clusters** - Threat protection for clusters and Linux nodes generates security alerts for suspicious activities. Learn more in [Run-time protection for Kubernetes nodes, clusters, and hosts](#run-time-protection-for-kubernetes-nodes-and-clusters). +- [**Run-time threat protection for nodes and clusters**](#run-time-protection-for-kubernetes-nodes-and-clusters) - Threat protection for clusters and Linux nodes generates security alerts for suspicious activities. ## Hardening @@ -38,7 +39,7 @@ Defender for Containers helps with the core aspects of container security: Defender for Cloud continuously assesses the configurations of your clusters and compares them with the initiatives applied to your subscriptions. When it finds misconfigurations, Defender for Cloud generates security recommendations. Use Defender for Cloud's **recommendations page** to view recommendations and remediate issues. For details of the relevant Defender for Cloud recommendations that might appear for this feature, see the [compute section](recommendations-reference.md#recs-container) of the recommendations reference table. -For Kubernetes clusters on EKS, you'll need to connect your AWS account to Microsoft Defender for Cloud via the environment settings page as described in [Connect your AWS accounts to Microsoft Defender for Cloud](quickstart-onboard-aws.md). Then ensure you've enabled the CSPM plan. +For Kubernetes clusters on EKS, you'll need to [connect your AWS account to Microsoft Defender for Cloud](quickstart-onboard-aws.md). Then ensure you've enabled the CSPM plan. When reviewing the outstanding recommendations for your container-related resources, whether in asset inventory or the recommendations page, you can use the resource filter: @@ -46,7 +47,7 @@ When reviewing the outstanding recommendations for your container-related resour ### Kubernetes data plane hardening -For a bundle of recommendations to protect the workloads of your Kubernetes containers, install the **Azure Policy for Kubernetes**. You can also auto deploy this component as explained in [enable auto provisioning of agents and extensions](enable-data-collection.md#auto-provision-mma). +To protect the workloads of your Kubernetes containers with tailored recommendations, install the **Azure Policy for Kubernetes**. You can also auto deploy this component as explained in [enable auto provisioning of agents and extensions](enable-data-collection.md#auto-provision-mma). With the add-on on your AKS cluster, every request to the Kubernetes API server will be monitored against the predefined set of best practices before being persisted to the cluster. You can then configure to **enforce** the best practices and mandate them for future workloads. @@ -69,147 +70,46 @@ Learn more in [Vulnerability assessment](defender-for-containers-usage.md). :::image type="content" source="./media/defender-for-containers/recommendation-acr-images-with-vulnerabilities.png" alt-text="Sample Microsoft Defender for Cloud recommendation about vulnerabilities discovered in Azure Container Registry (ACR) hosted images." lightbox="./media/defender-for-containers/recommendation-acr-images-with-vulnerabilities.png"::: -### View vulnerabilities for running images +### View vulnerabilities for running images -The recommendation **Running container images should have vulnerability findings resolved** shows vulnerabilities for running images by using the scan results from ACR registries and information on running images from the Defender security profile/extension. Images that are deployed from a non ACR registry, will appear under the **Not applicable** tab. +The recommendation **Running container images should have vulnerability findings resolved** shows vulnerabilities for running images by using the scan results from ACR registries and information on running images from the Defender security profile/extension. Images that are deployed from a non-ACR registry, will appear under the **Not applicable** tab. -:::image type="content" source="media/defender-for-containers/running-image-vulnerabilities-recommendation.png" alt-text="Screenshot showing where the recommendation is viewable" lightbox="media/defender-for-containers/running-image-vulnerabilities-recommendation-expanded.png"::: +:::image type="content" source="media/defender-for-containers/running-image-vulnerabilities-recommendation.png" alt-text="Screenshot showing where the recommendation is viewable." lightbox="media/defender-for-containers/running-image-vulnerabilities-recommendation-expanded.png"::: ## Run-time protection for Kubernetes nodes and clusters -Defender for Cloud provides real-time threat protection for your containerized environments and generates alerts for suspicious activities. You can use this information to quickly remediate security issues and improve the security of your containers. +Defender for Containers provides real-time threat protection for your containerized environments and generates alerts for suspicious activities. You can use this information to quickly remediate security issues and improve the security of your containers. Threat protection at the cluster level is provided by the Defender profile and analysis of the Kubernetes audit logs. Examples of events at this level include exposed Kubernetes dashboards, creation of high-privileged roles, and the creation of sensitive mounts. -Threat protection at the cluster level is provided by the Defender profile and analysis of the Kubernetes audit logs. Examples of events at this level include exposed Kubernetes dashboards, creation of high-privileged roles, and the creation of sensitive mounts. +In addition, our threat detection goes beyond the Kubernetes management layer. Defender for Containers includes **host-level threat detection** with over 60 Kubernetes-aware analytics, AI, and anomaly detections based on your runtime workload. Our global team of security researchers constantly monitor the threat landscape. They add container-specific alerts and vulnerabilities as they're discovered. -In addition, our threat detection goes beyond the Kubernetes management layer. Defender for Containers includes **host-level threat detection** with over 60 Kubernetes-aware analytics, AI, and anomaly detections based on your runtime workload. Our global team of security researchers constantly monitor the threat landscape. They add container-specific alerts and vulnerabilities as they're discovered. Together, this solution monitors the growing attack surface of multicloud Kubernetes deployments and tracks the [MITRE ATT&CK® matrix for Containers](https://www.microsoft.com/security/blog/2021/04/29/center-for-threat-informed-defense-teams-up-with-microsoft-partners-to-build-the-attck-for-containers-matrix/), a framework that was developed by the [Center for Threat-Informed Defense](https://mitre-engenuity.org/ctid/) in close partnership with Microsoft and others. +This solution monitors the growing attack surface of multi-cloud Kubernetes deployments and tracks the [MITRE ATT&CK® matrix for Containers](https://www.microsoft.com/security/blog/2021/04/29/center-for-threat-informed-defense-teams-up-with-microsoft-partners-to-build-the-attck-for-containers-matrix/), a framework that was developed by the [Center for Threat-Informed Defense](https://mitre-engenuity.org/ctid/) in close partnership with Microsoft and others. The full list of available alerts can be found in the [Reference table of alerts](alerts-reference.md#alerts-k8scluster). :::image type="content" source="media/defender-for-containers/sample-containers-plan-alerts.png" alt-text="Screenshot of Defender for Cloud's alerts page showing alerts for multicloud Kubernetes resources." lightbox="./media/defender-for-containers/sample-containers-plan-alerts.png"::: -## Architecture overview - -The architecture of the various elements involved in the full range of protections provided by Defender for Containers varies depending on where your Kubernetes clusters are hosted. - -Defender for Containers protects your clusters whether they're running in: - -- **Azure Kubernetes Service (AKS) (Preview)** - Microsoft's managed service for developing, deploying, and managing containerized applications. - -- **Amazon Elastic Kubernetes Service (EKS) in a connected Amazon Web Services (AWS) account (Preview)** - Amazon's managed service for running Kubernetes on AWS without needing to install, operate, and maintain your own Kubernetes control plane or nodes. - -- **Google Kubernetes Engine (GKE) in a connected Google Cloud Platform (GCP) project (Preview)** - Google’s managed environment for deploying, managing, and scaling applications using GCP infrastructure. - -- **An unmanaged Kubernetes distribution** (using Azure Arc-enabled Kubernetes) - Cloud Native Computing Foundation (CNCF) certified Kubernetes clusters hosted on-premises or on IaaS. - -For high-level diagrams of each scenario, see the relevant tabs below. - -In the diagrams you'll see that the items received and analyzed by Defender for Cloud include: - -- Audit logs and security events from the API server -- Cluster configuration information from the control plane -- Workload configuration from Azure Policy -- Security signals and events from the node level - -### [**Azure (AKS)**](#tab/defender-for-container-arch-aks) - -### Architecture diagram of Defender for Cloud and AKS clusters - -When Defender for Cloud protects a cluster hosted in Azure Kubernetes Service, the collection of audit log data is agentless and frictionless. - -The **Defender profile (preview)** deployed to each node provides the runtime protections and collects signals from nodes using [eBPF technology](https://ebpf.io/). - -The **Azure Policy add-on for Kubernetes** collects cluster and workload configuration for admission control policies as explained in [Protect your Kubernetes workloads](kubernetes-workload-protections.md). - -> [!NOTE] -> Defender for Containers' **Defender profile** is a preview feature. - -:::image type="content" source="./media/defender-for-containers/architecture-aks-cluster.png" alt-text="High-level architecture of the interaction between Microsoft Defender for Containers, Azure Kubernetes Service, and Azure Policy." lightbox="./media/defender-for-containers/architecture-aks-cluster.png"::: - -#### Defender profile component details - -| Pod Name | Namespace | Kind | Short Description | Capabilities | Resource limits | Egress Required | -|--|--|--|--|--|--|--| -| azuredefender-collector-ds-* | kube-system | [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) | A set of containers that focus on collecting inventory and security events from the Kubernetes environment. | SYS_ADMIN, 
    SYS_RESOURCE,
    SYS_PTRACE | memory: 64Mi

    cpu: 60m | No | -| azuredefender-collector-misc-* | kube-system | [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) | A set of containers that focus on collecting inventory and security events from the Kubernetes environment that aren't bounded to a specific node. | N/A | memory: 64Mi

    cpu: 60m | No | -| azuredefender-publisher-ds-* | kube-system | [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) | Publish the collected data to Microsoft Defender for Containers' backend service where the data will be processed for and analyzed. | N/A | memory: 200Mi  

    cpu: 60m | Https 443

    Learn more about the [outbound access prerequisites](../aks/limit-egress-traffic.md#microsoft-defender-for-containers) | - -\* resource limits aren't configurable - -### [**On-premises / IaaS (Arc)**](#tab/defender-for-container-arch-arc) - -### Architecture diagram of Defender for Cloud and Arc-enabled Kubernetes clusters - -For all clusters hosted outside of Azure, [Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md) is required to connect the clusters to Azure and provide Azure services such as Defender for Containers. - -With the cluster connected to Azure, an [Arc extension](../azure-arc/kubernetes/extensions.md) collects Kubernetes audit logs data from all control plane nodes in the cluster and sends them to the Microsoft Defender for Cloud backend in the cloud for further analysis. The extension is registered with a Log Analytics workspace used as a data pipeline, but the audit log data isn't stored in the Log Analytics workspace. - -Workload configuration information is collected by an Azure Policy add-on. As explained in [this Azure Policy for Kubernetes page](../governance/policy/concepts/policy-for-kubernetes.md), the add-on extends the open-source [Gatekeeper v3](https://github.com/open-policy-agent/gatekeeper) admission controller webhook for [Open Policy Agent](https://www.openpolicyagent.org/). Kubernetes admission controllers are plugins that enforce how your clusters are used. The add-on registers as a web hook to Kubernetes admission control and makes it possible to apply at-scale enforcements and safeguards on your clusters in a centralized, consistent manner. - -> [!NOTE] -> Defender for Containers' support for Arc-enabled Kubernetes clusters is a preview feature. - -:::image type="content" source="./media/defender-for-containers/architecture-arc-cluster.png" alt-text="High-level architecture of the interaction between Microsoft Defender for Containers, Azure Kubernetes Service, Azure Arc-enabled Kubernetes, and Azure Policy." lightbox="./media/defender-for-containers/architecture-arc-cluster.png"::: - - - -### [**AWS (EKS)**](#tab/defender-for-container-arch-eks) - -### Architecture diagram of Defender for Cloud and EKS clusters - -The following describes the components necessary in order to receive the full protection offered by Microsoft Defender for Cloud for Containers. - -- **[Kubernetes audit logs](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/)** – [AWS account’s CloudWatch](https://aws.amazon.com/cloudwatch/) enables, and collects audit log data through an agentless collector, and sends the collected information to the Microsoft Defender for Cloud backend for further analysis. - -- **[Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md)** - An agent based solution that connects your EKS clusters to Azure. Azure then is capable of providing services such as Defender, and Policy as [Arc extensions](../azure-arc/kubernetes/extensions.md). - -- **The Defender extension** – The [DeamonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) that collects signals from hosts using [eBPF technology](https://ebpf.io/), and provides runtime protection. The extension is registered with a Log Analytics workspace, and used as a data pipeline. However, the audit log data isn't stored in the Log Analytics workspace. - -- **The Azure Policy extension** - The workload's configuration information is collected by the Azure Policy add-on. The Azure Policy add-on extends the open-source [Gatekeeper v3](https://github.com/open-policy-agent/gatekeeper) admission controller webhook for [Open Policy Agent](https://www.openpolicyagent.org/). The extension registers as a web hook to Kubernetes admission control and makes it possible to apply at-scale enforcements, and safeguards on your clusters in a centralized, consistent manner. For more information, see [Understand Azure Policy for Kubernetes clusters](../governance/policy/concepts/policy-for-kubernetes.md). - -> [!NOTE] -> Defender for Containers' support for AWS EKS clusters is a preview feature. - -:::image type="content" source="./media/defender-for-containers/architecture-eks-cluster.png" alt-text="High-level architecture of the interaction between Microsoft Defender for Containers, Amazon Web Services' EKS clusters, Azure Arc-enabled Kubernetes, and Azure Policy." lightbox="./media/defender-for-containers/architecture-eks-cluster.png"::: - -### [**GCP (GKE)**](#tab/defender-for-container-gke) - -### Architecture diagram of Defender for Cloud and GKE clusters - -The following describes the components necessary in order to receive the full protection offered by Microsoft Defender for Cloud for Containers. - -- **[Kubernetes audit logs](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/)** – [GCP Cloud Logging](https://cloud.google.com/logging/) enables, and collects audit log data through an agentless collector, and sends the collected information to the Microsoft Defender for Cloud backend for further analysis. - -- **[Azure Arc-enabled Kubernetes](../azure-arc/kubernetes/overview.md)** - An agent based solution that connects your EKS clusters to Azure. Azure then is capable of providing services such as Defender, and Policy as [Arc extensions](../azure-arc/kubernetes/extensions.md). - -- **The Defender extension** – The [DeamonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) that collects signals from hosts using [eBPF technology](https://ebpf.io/), and provides runtime protection. The extension is registered with a Log Analytics workspace, and used as a data pipeline. However, the audit log data isn't stored in the Log Analytics workspace. - -- **The Azure Policy extension** - The workload's configuration information is collected by the Azure Policy add-on. The Azure Policy add-on extends the open-source [Gatekeeper v3](https://github.com/open-policy-agent/gatekeeper) admission controller webhook for [Open Policy Agent](https://www.openpolicyagent.org/). The extension registers as a web hook to Kubernetes admission control and makes it possible to apply at-scale enforcements, and safeguards on your clusters in a centralized, consistent manner. For more information, see [Understand Azure Policy for Kubernetes clusters](../governance/policy/concepts/policy-for-kubernetes.md). - -> [!NOTE] -> Defender for Containers' support for GCP GKE clusters is a preview feature. - -:::image type="content" source="./media/defender-for-containers/architecture-gke.png" alt-text="High-level architecture of the interaction between Microsoft Defender for Containers, Google GKE clusters, Azure Arc-enabled Kubernetes, and Azure Policy." lightbox="./media/defender-for-containers/architecture-gke.png"::: - ---- - ## FAQ - Defender for Containers - [What are the options to enable the new plan at scale?](#what-are-the-options-to-enable-the-new-plan-at-scale) -- [Does Microsoft Defender for Containers support AKS clusters with virtual machines scale set?](#does-microsoft-defender-for-containers-support-aks-clusters-with-virtual-machines-scale-set) +- [Does Microsoft Defender for Containers support AKS clusters with virtual machines scale sets?](#does-microsoft-defender-for-containers-support-aks-clusters-with-virtual-machines-scale-sets) - [Does Microsoft Defender for Containers support AKS without scale set (default)?](#does-microsoft-defender-for-containers-support-aks-without-scale-set-default) - [Do I need to install the Log Analytics VM extension on my AKS nodes for security protection?](#do-i-need-to-install-the-log-analytics-vm-extension-on-my-aks-nodes-for-security-protection) -### What are the options to enable the new plan at scale? -We’ve rolled out a new policy in Azure Policy, **Configure Microsoft Defender for Containers to be enabled**, to make it easier to enable the new plan at scale. +### What are the options to enable the new plan at scale? + +We’ve rolled out a new policy in Azure Policy, **Configure Microsoft Defender for Containers to be enabled**, to make it easier to enable the new plan at scale. -### Does Microsoft Defender for Containers support AKS clusters with virtual machines scale set? -Yes +### Does Microsoft Defender for Containers support AKS clusters with virtual machines scale sets? + +Yes. ### Does Microsoft Defender for Containers support AKS without scale set (default)? -No. Only Azure Kubernetes Service (AKS) clusters that use virtual machine scale sets for the nodes is supported. + +No. Only Azure Kubernetes Service (AKS) clusters that use virtual machine scale sets for the nodes is supported. ### Do I need to install the Log Analytics VM extension on my AKS nodes for security protection? -No, AKS is a managed service, and manipulation of the IaaS resources isn't supported. The Log Analytics VM extension is not needed and may result in additional charges. + +No, AKS is a managed service, and manipulation of the IaaS resources isn't supported. The Log Analytics VM extension isn't needed and may result in additional charges. ## Learn More diff --git a/articles/defender-for-cloud/enable-enhanced-security.md b/articles/defender-for-cloud/enable-enhanced-security.md index 24f300973ab7..5b8715550e75 100644 --- a/articles/defender-for-cloud/enable-enhanced-security.md +++ b/articles/defender-for-cloud/enable-enhanced-security.md @@ -2,9 +2,7 @@ title: Enable Microsoft Defender for Cloud's integrated workload protections description: Learn how to enable enhanced security features to extend the protections of Microsoft Defender for Cloud to your hybrid and multicloud resources ms.topic: quickstart -ms.author: benmansheim -author: bmansheim -ms.date: 11/09/2021 +ms.date: 05/31/2022 ms.custom: mode-other --- @@ -28,35 +26,35 @@ To enable all Defender for Cloud features including threat protection capabiliti - You can enable **Microsoft Defender for SQL** at either the subscription level or resource level - You can enable **Microsoft Defender for open-source relational databases** at the resource level only -### To enable enhanced security features on your subscriptions and workspaces: +### Enable enhanced security features on your subscriptions and workspaces: - To enable enhanced security features on one subscription: 1. From Defender for Cloud's main menu, select **Environment settings**. + 1. Select the subscription or workspace that you want to protect. - 1. Select **Enable all Microsoft Defender plans** to upgrade. + + 1. Select **Enable all** to upgrade. + 1. Select **Save**. - > [!TIP] - > You'll notice that each Microsoft Defender plan is priced separately and can be individually set to on or off. For example, you might want to turn off Defender for App Service on subscriptions that don't have an associated Azure App Service plan. - - :::image type="content" source="./media/enhanced-security-features-overview/pricing-tier-page.png" alt-text="Defender for Cloud's pricing page in the portal"::: - + :::image type="content" source="./media/enhanced-security-features-overview/pricing-tier-page.png" alt-text="Defender for Cloud's pricing page in the portal" lightbox="media/enhanced-security-features-overview/pricing-tier-page.png"::: + - To enable enhanced security on multiple subscriptions or workspaces: 1. From Defender for Cloud's menu, select **Getting started**. The **Upgrade** tab lists subscriptions and workspaces eligible for onboarding. - :::image type="content" source="./media/enable-enhanced-security/get-started-upgrade-tab.png" alt-text="Upgrade tab of the getting started page."::: + :::image type="content" source="./media/enable-enhanced-security/get-started-upgrade-tab.png" alt-text="Upgrade tab of the getting started page." lightbox="media/enable-enhanced-security/get-started-upgrade-tab.png"::: 1. From the **Select subscriptions and workspaces to protect with Microsoft Defender for Cloud** list, select the subscriptions and workspaces to upgrade and select **Upgrade** to enable all Microsoft Defender for Cloud security features. - If you select subscriptions and workspaces that aren't eligible for trial, the next step will upgrade them and charges will begin. + - If you select a workspace that's eligible for a free trial, the next step will begin a trial. - :::image type="content" source="./media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png" alt-text="Upgrade all selected workspaces and subscriptions from the getting started page."::: - + :::image type="content" source="./media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png" alt-text="Upgrade all selected workspaces and subscriptions from the getting started page." lightbox="media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png"::: ## Disable enhanced security features @@ -64,14 +62,12 @@ If you need to disable enhanced security features for a subscription, the proced 1. From Defender for Cloud's menu, open **Environment settings**. 1. Select the relevant subscription. -1. Select **Defender plans** and select **Enhanced security off**. - - :::image type="content" source="./media/enable-enhanced-security/disable-plans.png" alt-text="Enable or disable Defender for Cloud's enhanced security features."::: +1. Find the plan you wish to turn off and select **off**. -1. Select **Save**. + :::image type="content" source="./media/enable-enhanced-security/disable-plans.png" alt-text="Enable or disable Defender for Cloud's enhanced security features." lightbox="media/enable-enhanced-security/disable-plans.png"::: -> [!NOTE] -> After you disable enhanced security features - whether you disable a single plan or all plans at once - data collection may continue for a short period of time. + > [!NOTE] + > After you disable enhanced security features - whether you disable a single plan or all plans at once - data collection may continue for a short period of time. ## Next steps diff --git a/articles/defender-for-cloud/enhanced-security-features-overview.md b/articles/defender-for-cloud/enhanced-security-features-overview.md index 4bf4d42c1510..447fa181d096 100644 --- a/articles/defender-for-cloud/enhanced-security-features-overview.md +++ b/articles/defender-for-cloud/enhanced-security-features-overview.md @@ -2,10 +2,8 @@ title: Understand the enhanced security features of Microsoft Defender for Cloud description: Learn about the benefits of enabling enhanced security in Microsoft Defender for Cloud ms.topic: overview -ms.date: 04/11/2022 -ms.author: benmansheim +ms.date: 05/31/2022 ms.custom: references_regions -author: bmansheim --- # Microsoft Defender for Cloud's enhanced security features @@ -73,18 +71,21 @@ You can use any of the following ways to enable enhanced security for your subsc ### Can I enable Microsoft Defender for Servers on a subset of servers in my subscription? + No. When you enable [Microsoft Defender for Servers](defender-for-servers-introduction.md) on a subscription, all the machines in the subscription will be protected by Defender for Servers. An alternative is to enable Microsoft Defender for Servers at the Log Analytics workspace level. If you do this, only servers reporting to that workspace will be protected and billed. However, several capabilities will be unavailable. These include Microsoft Defender for Endpoint, VA solution (TVM/Qualys), just-in-time VM access, and more. ### If I already have a license for Microsoft Defender for Endpoint can I get a discount for Defender for Servers? + If you've already got a license for **Microsoft Defender for Endpoint for Servers Plan 2**, you won't have to pay for that part of your Microsoft Defender for Servers license. Learn more about [this license](/microsoft-365/security/defender-endpoint/minimum-requirements#licensing-requirements). To request your discount, [contact Defender for Cloud's support team](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/overview). You'll need to provide the relevant workspace ID, region, and number of Microsoft Defender for Endpoint for servers licenses applied for machines in the given workspace. The discount will be effective starting from the approval date, and won't take place retroactively. -### My subscription has Microsoft Defender for Servers enabled, do I pay for not-running servers? +### My subscription has Microsoft Defender for Servers enabled, do I pay for not-running servers? + No. When you enable [Microsoft Defender for Servers](defender-for-servers-introduction.md) on a subscription, you won't be charged for any machines that are in the deallocated power state while they're in that state. Machines are billed according to their power state as shown in the following table: | State | Description | Instance usage billed | @@ -98,35 +99,101 @@ No. When you enable [Microsoft Defender for Servers](defender-for-servers-introd :::image type="content" source="media/enhanced-security-features-overview/deallocated-virtual-machines.png" alt-text="Azure Virtual Machines showing a deallocated machine."::: +### If I enable Defender for Clouds Servers plan on the subscription level, do I need to enable it on the workspace level? + +When you enable the Servers plan on the subscription level, Defender for Cloud will enable the Servers plan on your default workspace(s) automatically when auto-provisioning is enabled. This can be accomplished on the Auto provisioning page by selecting **Connect Azure VMs to the default workspace(s) created by Defender for Cloud** option and selecting **Apply**. + +:::image type="content" source="media/enhanced-security-features-overview/connect-workspace.png" alt-text="Screenshot showing how to auto provision defender for cloud to manage your workspaces."::: + +However, if you're using a custom workspace in place of the default workspace, you'll need to enable the Servers plan on all of your custom workspaces that do not have it enabled. + +If you're using a custom workspace and enable the plan on the subscription level only, the `Microsoft Defender for servers should be enabled on workspaces` recommendation will appear on the Recommendations page. This recommendation will give you the option to enable the servers plan on the workspace level with the Fix button. Until the workspace has the Servers plan enabled, any connected VM will not benefit from the full security coverage (Microsoft Defender for Endpoint, VA solution (TVM/Qualys), just-in-time VM access, and more) offered by the Defender for Cloud, but will still incur the cost. + +Enabling the Servers plan on both the subscription and its connected workspaces, will not incur a double charge. The system will identify each unique VM. + +If you enable the Servers plan on cross-subscription workspaces, all connected VMs, even those from subscriptions that it was not enabled on, will be billed. + ### Will I be charged for machines without the Log Analytics agent installed? + Yes. When you enable [Microsoft Defender for Servers](defender-for-servers-introduction.md) on a subscription, the machines in that subscription get a range of protections even if you haven't installed the Log Analytics agent. This is applicable for Azure virtual machines, Azure virtual machine scale sets instances, and Azure Arc-enabled servers. -### If a Log Analytics agent reports to multiple workspaces, will I be charged twice? -Yes. If you've configured your Log Analytics agent to send data to two or more different Log Analytics workspaces (multi-homing), you'll be charged for every workspace that has a 'Security' or 'AntiMalware' solution installed. +### If a Log Analytics agent reports to multiple workspaces, will I be charged twice? + +No you will not be charged twice. ### If a Log Analytics agent reports to multiple workspaces, is the 500 MB free data ingestion available on all of them? + Yes. If you've configured your Log Analytics agent to send data to two or more different Log Analytics workspaces (multi-homing), you'll get 500 MB free data ingestion. It's calculated per node, per reported workspace, per day, and available for every workspace that has a 'Security' or 'AntiMalware' solution installed. You'll be charged for any data ingested over the 500 MB limit. ### Is the 500 MB free data ingestion calculated for an entire workspace or strictly per machine? -You'll get 500 MB free data ingestion per day, for every machine connected to the workspace. Specifically for security data types directly collected by Defender for Cloud. -This data is a daily rate averaged across all nodes. So even if some machines send 100-MB and others send 800-MB, if the total doesn't exceed the **[number of machines] x 500 MB** free limit, you won't be charged extra. +You'll get 500 MB free data ingestion per day, for every VM connected to the workspace. Specifically for the [security data types](#what-data-types-are-included-in-the-500-mb-data-daily-allowance) that are directly collected by Defender for Cloud. + +This data is a daily rate averaged across all nodes. Your total daily free limit is equal to **[number of machines] x 500 MB**. So even if some machines send 100-MB and others send 800-MB, if the total doesn't exceed your total daily free limit, you won't be charged extra. ### What data types are included in the 500 MB data daily allowance? Defender for Cloud's billing is closely tied to the billing for Log Analytics. [Microsoft Defender for Servers](defender-for-servers-introduction.md) provides a 500 MB/node/day allocation for machines against the following subset of [security data types](/azure/azure-monitor/reference/tables/tables-category#security): -- SecurityAlert -- SecurityBaseline -- SecurityBaselineSummary -- SecurityDetection -- SecurityEvent -- WindowsFirewall -- MaliciousIPCommunication -- SysmonEvent -- ProtectionStatus -- Update and UpdateSummary data types when the Update Management solution is not running on the workspace or solution targeting is enabled + +- [SecurityAlert](/azure/azure-monitor/reference/tables/securityalert) +- [SecurityBaseline](/azure/azure-monitor/reference/tables/securitybaseline) +- [SecurityBaselineSummary](/azure/azure-monitor/reference/tables/securitybaselinesummary) +- [SecurityDetection](/azure/azure-monitor/reference/tables/securitydetection) +- [SecurityEvent](/azure/azure-monitor/reference/tables/securityevent) +- [WindowsFirewall](/azure/azure-monitor/reference/tables/windowsfirewall) +- [SysmonEvent](/azure/azure-monitor/reference/tables/sysmonevent) +- [ProtectionStatus](/azure/azure-monitor/reference/tables/protectionstatus) +- [Update](/azure/azure-monitor/reference/tables/update) and [UpdateSummary](/azure/azure-monitor/reference/tables/updatesummary) when the Update Management solution isn't running in the workspace or solution targeting is enabled. If the workspace is in the legacy Per Node pricing tier, the Defender for Cloud and Log Analytics allocations are combined and applied jointly to all billable ingested data. +## How can I monitor my daily usage + +You can view your data usage in two different ways, the Azure portal, or by running a script. + +**To view your usage in the Azure portal**: + +1. Sign in to the [Azure portal](https://portal.azure.com). + +1. Navigate to **Log Analytics workspaces**. + +1. Select your workspace. + +1. Select **Usage and estimated costs**. + + :::image type="content" source="media/enhanced-security-features-overview/data-usage.png" alt-text="Screenshot of your data usage of your log analytics workspace. " lightbox="media/enhanced-security-features-overview/data-usage.png"::: + +You can also view estimated costs under different pricing tiers by selecting :::image type="icon" source="media/enhanced-security-features-overview/drop-down-icon.png" border="false"::: for each pricing tier. + +:::image type="content" source="media/enhanced-security-features-overview/estimated-costs.png" alt-text="Screenshot showing how to view estimated costs under additional pricing tiers." lightbox="media/enhanced-security-features-overview/estimated-costs.png"::: + +**To view your usage by using a script**: + +1. Sign in to the [Azure portal](https://portal.azure.com). + +1. Navigate to **Log Analytics workspaces** > **Logs**. + +1. Select your time range. Learn about [time ranges](../azure-monitor/logs/log-analytics-tutorial.md). + +1. Copy and past the following query into the **Type your query here** section. + + ```azurecli + let Unit= 'GB'; + Usage + | where IsBillable == 'TRUE' + | where DataType in ('SecurityAlert', 'SecurityBaseline', 'SecurityBaselineSummary', 'SecurityDetection', 'SecurityEvent', 'WindowsFirewall', 'MaliciousIPCommunication', 'SysmonEvent', 'ProtectionStatus', 'Update', 'UpdateSummary') + | project TimeGenerated, DataType, Solution, Quantity, QuantityUnit + | summarize DataConsumedPerDataType = sum(Quantity)/1024 by DataType, DataUnit = Unit + | sort by DataConsumedPerDataType desc + ``` + +1. Select **Run**. + + :::image type="content" source="media/enhanced-security-features-overview/select-run.png" alt-text="Screenshot showing where to enter your query and where the select run button is located." lightbox="media/enhanced-security-features-overview/select-run.png"::: + +You can learn how to [Analyze usage in Log Analytics workspace](../azure-monitor/logs/analyze-usage.md). + +Based on your usage, you won't be billed until you've used your daily allowance. If you're receiving a bill, it's only for the data used after the 500mb has been consumed, or for other service that does not fall under the coverage of Defender for Cloud. + ## Next steps This article explained Defender for Cloud's pricing options. For related material, see: diff --git a/articles/defender-for-cloud/media/enable-enhanced-security/disable-plans.png b/articles/defender-for-cloud/media/enable-enhanced-security/disable-plans.png index 5e76736123b0..d7d56758f87c 100644 Binary files a/articles/defender-for-cloud/media/enable-enhanced-security/disable-plans.png and b/articles/defender-for-cloud/media/enable-enhanced-security/disable-plans.png differ diff --git a/articles/defender-for-cloud/media/enable-enhanced-security/get-started-upgrade-tab.png b/articles/defender-for-cloud/media/enable-enhanced-security/get-started-upgrade-tab.png index 5d61eac8c8a9..1308b68a22d9 100644 Binary files a/articles/defender-for-cloud/media/enable-enhanced-security/get-started-upgrade-tab.png and b/articles/defender-for-cloud/media/enable-enhanced-security/get-started-upgrade-tab.png differ diff --git a/articles/defender-for-cloud/media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png b/articles/defender-for-cloud/media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png index b0d7ace23706..43f6f32a20d1 100644 Binary files a/articles/defender-for-cloud/media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png and b/articles/defender-for-cloud/media/enable-enhanced-security/upgrade-selected-workspaces-and-subscriptions.png differ diff --git a/articles/defender-for-cloud/media/enhanced-security-features-overview/connect-workspace.png b/articles/defender-for-cloud/media/enhanced-security-features-overview/connect-workspace.png new file mode 100644 index 000000000000..4ae440b12394 Binary files /dev/null and b/articles/defender-for-cloud/media/enhanced-security-features-overview/connect-workspace.png differ diff --git a/articles/defender-for-cloud/media/enhanced-security-features-overview/data-usage.png b/articles/defender-for-cloud/media/enhanced-security-features-overview/data-usage.png new file mode 100644 index 000000000000..3ce94697d67e Binary files /dev/null and b/articles/defender-for-cloud/media/enhanced-security-features-overview/data-usage.png differ diff --git a/articles/defender-for-cloud/media/enhanced-security-features-overview/drop-down-icon.png b/articles/defender-for-cloud/media/enhanced-security-features-overview/drop-down-icon.png new file mode 100644 index 000000000000..b3a68546f033 Binary files /dev/null and b/articles/defender-for-cloud/media/enhanced-security-features-overview/drop-down-icon.png differ diff --git a/articles/defender-for-cloud/media/enhanced-security-features-overview/estimated-costs.png b/articles/defender-for-cloud/media/enhanced-security-features-overview/estimated-costs.png new file mode 100644 index 000000000000..adba6728a20d Binary files /dev/null and b/articles/defender-for-cloud/media/enhanced-security-features-overview/estimated-costs.png differ diff --git a/articles/defender-for-cloud/media/enhanced-security-features-overview/pricing-tier-page.png b/articles/defender-for-cloud/media/enhanced-security-features-overview/pricing-tier-page.png index bba3d7ed5f7d..1d3528189a96 100644 Binary files a/articles/defender-for-cloud/media/enhanced-security-features-overview/pricing-tier-page.png and b/articles/defender-for-cloud/media/enhanced-security-features-overview/pricing-tier-page.png differ diff --git a/articles/defender-for-cloud/media/enhanced-security-features-overview/select-run.png b/articles/defender-for-cloud/media/enhanced-security-features-overview/select-run.png new file mode 100644 index 000000000000..2eb1060a4471 Binary files /dev/null and b/articles/defender-for-cloud/media/enhanced-security-features-overview/select-run.png differ diff --git a/articles/defender-for-cloud/release-notes.md b/articles/defender-for-cloud/release-notes.md index 72d77e98c403..1bdf20480799 100644 --- a/articles/defender-for-cloud/release-notes.md +++ b/articles/defender-for-cloud/release-notes.md @@ -2,7 +2,7 @@ title: Release notes for Microsoft Defender for Cloud description: A description of what's new and changed in Microsoft Defender for Cloud ms.topic: reference -ms.date: 05/18/2022 +ms.date: 05/30/2022 --- # What's new in Microsoft Defender for Cloud? @@ -22,7 +22,7 @@ Updates in May include: - [Multicloud settings of Servers plan are now available in connector level](#multicloud-settings-of-servers-plan-are-now-available-in-connector-level) - [JIT (Just-in-time) access for VMs is now available for AWS EC2 instances (Preview)](#jit-just-in-time-access-for-vms-is-now-available-for-aws-ec2-instances-preview) -- [Add and remove the Defender profile for AKS clusters from the CLI](#add-and-remove-the-defender-profile-for-aks-clusters-from-the-cli) +- [Add and remove the Defender profile for AKS clusters using the CLI](#add-and-remove-the-defender-profile-for-aks-clusters-using-the-cli) ### Multicloud settings of Servers plan are now available in connector level @@ -50,16 +50,16 @@ Learn more about [vulnerability management](deploy-vulnerability-assessment-tvm. ### JIT (Just-in-time) access for VMs is now available for AWS EC2 instances (Preview) -When you [connect AWS accounts](quickstart-onboard-aws.md), JIT will automatically evaluate the network configuration of your instances, security groups and recommend which instances need protection for their exposed management ports. This is similar to how JIT works with Azure. When you onboard unprotected EC2 instances, JIT will block public access to the management ports and only open them with authorized requests for a limited time frame. +When you [connect AWS accounts](quickstart-onboard-aws.md), JIT will automatically evaluate the network configuration of your instance's security groups and recommend which instances need protection for their exposed management ports. This is similar to how JIT works with Azure. When you onboard unprotected EC2 instances, JIT will block public access to the management ports and only open them with authorized requests for a limited time frame. Learn how [JIT protects your AWS EC2 instances](just-in-time-access-overview.md#how-jit-operates-with-network-resources-in-azure-and-aws) -### Add and remove the Defender profile for AKS clusters from the CLI +### Add and remove the Defender profile for AKS clusters using the CLI The Defender profile (preview) is required for Defender for Containers to provide the runtime protections and collects signals from nodes. You can now use the Azure CLI to [add and remove the Defender profile](defender-for-containers-enable.md?tabs=k8s-deploy-cli%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Ck8s-remove-cli&pivots=defender-for-container-aks#use-azure-cli-to-deploy-the-defender-extension) for an AKS cluster. > [!NOTE] -> This option is included in [Azure CLI 3.7 and above](/cli/azure/update-azure-cli.md). +> This option is included in [Azure CLI 3.7 and above](https://docs.microsoft.com/cli/azure/update-azure-cli). ## April 2022 diff --git a/articles/defender-for-cloud/upcoming-changes.md b/articles/defender-for-cloud/upcoming-changes.md index 192ef8cb2983..68dc02b2a1dc 100644 --- a/articles/defender-for-cloud/upcoming-changes.md +++ b/articles/defender-for-cloud/upcoming-changes.md @@ -2,7 +2,7 @@ title: Important changes coming to Microsoft Defender for Cloud description: Upcoming changes to Microsoft Defender for Cloud that you might need to be aware of and for which you might need to plan ms.topic: overview -ms.date: 05/10/2022 +ms.date: 05/31/2022 --- # Important upcoming changes to Microsoft Defender for Cloud @@ -19,15 +19,15 @@ If you're looking for the latest release notes, you'll find them in the [What's | Planned change | Estimated date for change | |--|--| -| [Changes to recommendations for managing endpoint protection solutions](#changes-to-recommendations-for-managing-endpoint-protection-solutions) | May 2022 | -| [Key Vault recommendations changed to "audit"](#key-vault-recommendations-changed-to-audit) | May 2022 | +| [Changes to recommendations for managing endpoint protection solutions](#changes-to-recommendations-for-managing-endpoint-protection-solutions) | June 2022 | +| [Key Vault recommendations changed to "audit"](#key-vault-recommendations-changed-to-audit) | June 2022 | | [Multiple changes to identity recommendations](#multiple-changes-to-identity-recommendations) | June 2022 | | [Deprecating three VM alerts](#deprecating-three-vm-alerts) | June 2022| | [Deprecating the "API App should only be accessible over HTTPS" policy](#deprecating-the-api-app-should-only-be-accessible-over-https-policy)|June 2022| ### Changes to recommendations for managing endpoint protection solutions -**Estimated date for change:** May 2022 +**Estimated date for change:** June 2022 In August 2021, we added two new **preview** recommendations to deploy and maintain the endpoint protection solutions on your machines. For full details, [see the release note](release-notes-archive.md#two-new-recommendations-for-managing-endpoint-protection-solutions-in-preview). @@ -47,6 +47,8 @@ Learn more: ### Key Vault recommendations changed to "audit" +**Estimated date for change:** June 2022 + The Key Vault recommendations listed here are currently disabled so that they don't impact your secure score. We will change their effect to "audit". | Recommendation name | Recommendation ID | @@ -86,23 +88,23 @@ The new release will bring the following capabilities: |External accounts with owner permissions should be removed from your subscription|c3b6ae71-f1f0-31b4-e6c1-d5951285d03d| |External accounts with read permissions should be removed from your subscription|a8c6a4ad-d51e-88fe-2979-d3ee3c864f8b| |External accounts with write permissions should be removed from your subscription|04e7147b-0deb-9796-2e5c-0336343ceb3d| + #### Recommendations rename This update, will rename two recommendations, and revise their descriptions. The assessment keys will remain unchanged. -| Property | Current value | New update's change | -|--|--|--| -|**First recommendation**| - | - | -|Assessment key | e52064aa-6853-e252-a11e-dffc675689c2 | No change | -| Name | [Deprecated accounts with owner permissions should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/e52064aa-6853-e252-a11e-dffc675689c2) | Subscriptions should be purged of accounts that are blocked in Active Directory and have owner permissions. | -| Description | User accounts that have been blocked from signing in, should be removed from your subscriptions. -These accounts can be targets for attackers looking to find ways to access your data without being noticed. | User accounts that have been blocked from signing into Active Directory, should be removed from your subscriptions. These accounts can be targets for attackers looking to find ways to access your data without being noticed.
    Learn more about securing the identity perimeter in [Azure Identity Management and access control security best practices](../security/fundamentals/identity-management-best-practices.md). | -| Related policy | [Deprecated accounts with owner permissions should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2febb62a0c-3560-49e1-89ed-27e074e9f8ad) | Subscriptions should be purged of accounts that are blocked in Active Directory and have owner permissions. | -|**Second recommendation**| - | - | -| Assessment key | 00c6d40b-e990-6acf-d4f3-471e747a27c4 | No change | -| Name | [Deprecated accounts should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/00c6d40b-e990-6acf-d4f3-471e747a27c4) | Subscriptions should be purged of accounts that are blocked in Active Directory and have read and write permissions. | -| Description | User accounts that have been blocked from signing in, should be removed from your subscriptions.
    These accounts can be targets for attackers looking to find ways to access your data without being noticed. | User accounts that have been blocked from signing into Active Directory, should be removed from your subscriptions. These accounts can be targets for attackers looking to find ways to access your data without being noticed.
    Learn more about securing the identity perimeter in [Azure Identity Management and access control security best practices](../security/fundamentals/identity-management-best-practices.md). | -| Related policy | [Deprecated accounts should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2f6b1cbf55-e8b6-442f-ba4c-7246b6381474) | Subscriptions should be purged of accounts that are blocked in Active Directory and have read and write permissions. | + | Property | Current value | New update's change | + |----|----|----| + |**First recommendation**| - | - | + |Assessment key | e52064aa-6853-e252-a11e-dffc675689c2 | No change| + | Name | [Deprecated accounts with owner permissions should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/e52064aa-6853-e252-a11e-dffc675689c2) |Subscriptions should be purged of accounts that are blocked in Active Directory and have owner permissions.| + |Description| User accounts that have been blocked from signing in, should be removed from your subscriptions.|These accounts can be targets for attackers looking to find ways to access your data without being noticed.
    Learn more about securing the identity perimeter in [Azure Identity Management and access control security best practices](../security/fundamentals/identity-management-best-practices.md).| + |Related policy|[Deprecated accounts with owner permissions should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2febb62a0c-3560-49e1-89ed-27e074e9f8ad) | Subscriptions should be purged of accounts that are blocked in Active Directory and have owner permissions.| + |**Second recommendation**| - | - | + | Assessment key | 00c6d40b-e990-6acf-d4f3-471e747a27c4 | No change | + | Name | [Deprecated accounts should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/00c6d40b-e990-6acf-d4f3-471e747a27c4)|Subscriptions should be purged of accounts that are blocked in Active Directory and have read and write permissions.| +|Description|User accounts that have been blocked from signing in, should be removed from your subscriptions.
    These accounts can be targets for attackers looking to find ways to access your data without being noticed.|User accounts that have been blocked from signing into Active Directory, should be removed from your subscriptions.
    Learn more about securing the identity perimeter in [Azure Identity Management and access control security best practices](../security/fundamentals/identity-management-best-practices.md).| + | Related policy | [Deprecated accounts should be removed from your subscription](https://ms.portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2fproviders%2fMicrosoft.Authorization%2fpolicyDefinitions%2f6b1cbf55-e8b6-442f-ba4c-7246b6381474) | Subscriptions should be purged of accounts that are blocked in Active Directory and have read and write permissions. | ### Deprecating three VM alerts diff --git a/articles/defender-for-iot/organizations/appliance-catalog/hpe-proliant-dl360.md b/articles/defender-for-iot/organizations/appliance-catalog/hpe-proliant-dl360.md index c8bd5f2b1ac9..c8f5365a56da 100644 --- a/articles/defender-for-iot/organizations/appliance-catalog/hpe-proliant-dl360.md +++ b/articles/defender-for-iot/organizations/appliance-catalog/hpe-proliant-dl360.md @@ -12,7 +12,7 @@ This article describes the **HPE ProLiant DL360** appliance for OT sensors. | Appliance characteristic |Details | |---------|---------| |**Hardware profile** | Corporate | -|**Performance** | Max bandwidth: 3Gbp/s
    Max devices: 12,000 | +|**Performance** | Max bandwidth: 3Gbp/s
    Max devices: 12,000 | |**Physical specifications** | Mounting: 1U
    Ports: 15x RJ45 or 8x SFP (OPT)| |**Status** | Supported, Available preconfigured| diff --git a/articles/defender-for-iot/organizations/appliance-catalog/virtual-management-vmware.md b/articles/defender-for-iot/organizations/appliance-catalog/virtual-management-vmware.md index 2681fd3c401b..e669fcfdf40c 100644 --- a/articles/defender-for-iot/organizations/appliance-catalog/virtual-management-vmware.md +++ b/articles/defender-for-iot/organizations/appliance-catalog/virtual-management-vmware.md @@ -12,7 +12,7 @@ This article describes an on-premises management console deployment on a virtual | Appliance characteristic |Details | |---------|---------| |**Hardware profile** | As required for your organization. For more information, see [Which appliances do I need?](../ot-appliance-sizing.md) | -|**Performance** | As required for your organization. For more information, see [Which appliances do I need?](../ot-appliance-sizing.md) | +|**Performance** | As required for your organization. For more information, see [Which appliances do I need?](../ot-appliance-sizing.md) | |**Physical specifications** | Virtual Machine | |**Status** | Supported | diff --git a/articles/defender-for-iot/organizations/appliance-catalog/virtual-sensor-hyper-v.md b/articles/defender-for-iot/organizations/appliance-catalog/virtual-sensor-hyper-v.md index c7d92127af70..94db90d9045b 100644 --- a/articles/defender-for-iot/organizations/appliance-catalog/virtual-sensor-hyper-v.md +++ b/articles/defender-for-iot/organizations/appliance-catalog/virtual-sensor-hyper-v.md @@ -146,7 +146,7 @@ You are able to attach a SPAN Virtual Interface to the Virtual Switch through Wi 1. Select **OK**. -These commands set the name of the newly added adapter hardware to be `Monitor`. If you are using Hyper-V Manager, the name of the newly added adapter hardware is set to `Network Adapter`. +These commands set the name of the newly added adapter hardware to be `Monitor`. If you're using Hyper-V Manager, the name of the newly added adapter hardware is set to `Network Adapter`. **To attach a SPAN Virtual Interface to the virtual switch with Hyper-V Manager**: diff --git a/articles/defender-for-iot/organizations/how-to-accelerate-alert-incident-response.md b/articles/defender-for-iot/organizations/how-to-accelerate-alert-incident-response.md index 50ae2ac1fcec..a191060e849b 100644 --- a/articles/defender-for-iot/organizations/how-to-accelerate-alert-incident-response.md +++ b/articles/defender-for-iot/organizations/how-to-accelerate-alert-incident-response.md @@ -60,7 +60,7 @@ The alert group will appear in supported partner solutions with the following pr - **alert_group** for Syslog objects -These fields should be configured in the partner solution to display the alert group name. If there is no alert associated with an alert group, the field in the partner solution will display **NA**. +These fields should be configured in the partner solution to display the alert group name. If there's no alert associated with an alert group, the field in the partner solution will display **NA**. ### Default alert groups @@ -97,7 +97,7 @@ Add custom alert rule to pinpoint specific activity as needed for your organizat For example, you might want to define an alert for an environment running MODBUS to detect any write commands to a memory register, on a specific IP address and ethernet destination. Another example would be an alert for any access to a specific IP address. -Use custom alert rule actions to for IT to take specific action when the alert is triggered, such as allowing users to access PCAP files from the alert, assigning alert severity, or generating an event that shows in the event timeline. Alert messages indicate that the alert was generated from a custom alert rule. +Use custom alert rule actions to instruct Defender for IT to take specific action when the alert is triggered, such as allowing users to access PCAP files from the alert, assigning alert severity, or generating an event that shows in the event timeline. Alert messages indicate that the alert was generated from a custom alert rule. **To create a custom alert rule**: diff --git a/articles/defender-for-iot/organizations/how-to-activate-and-set-up-your-sensor.md b/articles/defender-for-iot/organizations/how-to-activate-and-set-up-your-sensor.md index 0bb74fb7101f..7778ecfe214d 100644 --- a/articles/defender-for-iot/organizations/how-to-activate-and-set-up-your-sensor.md +++ b/articles/defender-for-iot/organizations/how-to-activate-and-set-up-your-sensor.md @@ -265,7 +265,7 @@ System messages provide general information about your sensor that may require y For more information, see: -- [Threat intelligence research and packages ](how-to-work-with-threat-intelligence-packages.md) +- [Threat intelligence research and packages](how-to-work-with-threat-intelligence-packages.md) - [Onboard a sensor](tutorial-onboarding.md#onboard-and-activate-the-virtual-sensor) diff --git a/articles/defender-for-iot/organizations/how-to-analyze-programming-details-changes.md b/articles/defender-for-iot/organizations/how-to-analyze-programming-details-changes.md index bc9858d2b156..bbdd7b0f505f 100644 --- a/articles/defender-for-iot/organizations/how-to-analyze-programming-details-changes.md +++ b/articles/defender-for-iot/organizations/how-to-analyze-programming-details-changes.md @@ -28,7 +28,7 @@ You may need to review programming activity: - After a planned update to controllers - - When a process or machine is not working correctly (to see who carried out the last update and when) + - When a process or machine isn't working correctly (to see who carried out the last update and when) :::image type="content" source="media/how-to-work-with-maps/differences.png" alt-text="Screenshot of a Programming Change Log"::: @@ -40,7 +40,7 @@ Other options let you: ## About authorized versus unauthorized programming events -Unauthorized programming events are carried out by devices that have not been learned or manually defined as programming devices. Authorized programming events are carried out by devices that were resolved or manually defined as programming devices. +Unauthorized programming events are carried out by devices that haven't been learned or manually defined as programming devices. Authorized programming events are carried out by devices that were resolved or manually defined as programming devices. The Programming Analysis window displays both authorized and unauthorized programming events. @@ -80,9 +80,9 @@ This section describes how to view programming files and compare versions. Searc |Programming timeline type | Description | |--|--| | Programmed Device | Provides details about the device that was programmed, including the hostname and file. | -| Recent Events | Displays the 50 most recent events detected by the sensor.
    To highlight an event, hover over it and click the star. :::image type="icon" source="media/how-to-work-with-maps/star.png" border="false":::
    The last 50 events can be viewed. | +| Recent Events | Displays the 50 most recent events detected by the sensor.
    To highlight an event, hover over it and select the star. :::image type="icon" source="media/how-to-work-with-maps/star.png" border="false":::
    The last 50 events can be viewed. | | Files | Displays the files detected for the chosen date and the file size on the programmed device.
    By default, the maximum number of files available for display per device is 300.
    By default, the maximum file size for each file is 15 MB. | -| File status :::image type="icon" source="media/how-to-work-with-maps/status-v2.png" border="false"::: | File labels indicate the status of the file on the device, including:
    **Added**: the file was added to the endpoint on the date or time selected.
    **Updated**: The file was updated on the date or time selected.
    **Deleted**: This file was removed.
    **No label**: The file was not changed. | +| File status :::image type="icon" source="media/how-to-work-with-maps/status-v2.png" border="false"::: | File labels indicate the status of the file on the device, including:
    **Added**: the file was added to the endpoint on the date or time selected.
    **Updated**: The file was updated on the date or time selected.
    **Deleted**: This file was removed.
    **No label**: The file wasn't changed. | | Programming Device | The device that made the programming change. Multiple devices may have carried out programming changes on one programmed device. The hostname, date, or time of change and logged in user are displayed. | | :::image type="icon" source="media/how-to-work-with-maps/current.png" border="false"::: | Displays the current file installed on the programmed device. | | :::image type="icon" source="media/how-to-work-with-maps/download-text.png" border="false"::: | Download a text file of the code displayed. | diff --git a/articles/defender-for-iot/organizations/how-to-control-what-traffic-is-monitored.md b/articles/defender-for-iot/organizations/how-to-control-what-traffic-is-monitored.md index 71edbad4e6cc..03d208023a42 100644 --- a/articles/defender-for-iot/organizations/how-to-control-what-traffic-is-monitored.md +++ b/articles/defender-for-iot/organizations/how-to-control-what-traffic-is-monitored.md @@ -156,7 +156,7 @@ If you're working with dynamic networks, you handle IP address changes that occu Changes might happen, for example, when a DHCP server assigns IP addresses. -Defining dynamic IP addresses on each sensor enables comprehensive, transparent support in instances of IP address changes. This ensures comprehensive reporting for each unique device. +Defining dynamic IP addresses on each sensor enables comprehensive, transparent support in instances of IP address changes. This activity ensures comprehensive reporting for each unique device. The sensor console presents the most current IP address associated with the device and indicates which devices are dynamic. For example: diff --git a/articles/defender-for-iot/organizations/how-to-create-data-mining-queries.md b/articles/defender-for-iot/organizations/how-to-create-data-mining-queries.md index f7c79acc710f..42b4824356f5 100644 --- a/articles/defender-for-iot/organizations/how-to-create-data-mining-queries.md +++ b/articles/defender-for-iot/organizations/how-to-create-data-mining-queries.md @@ -7,13 +7,13 @@ ms.topic: how-to # Run data mining queries -Using data mining queries to get dynamic, granular information about your network devices, including for specific time periods, internet connectivity, ports and protocols, firmware vrsions, programming commands, and device state. You can use data mining queries for: +Using data mining queries to get dynamic, granular information about your network devices, including for specific time periods, internet connectivity, ports and protocols, firmware versions, programming commands, and device state. You can use data mining queries for: - **SOC incident response**: Generate a report in real time to help deal with immediate incident response. For example, Data Mining can generate a report for a list of devices that might require patching. - **Forensics**: Generate a report based on historical data for investigative reports. - **Network security**: Generate a report that helps improve overall network security. For example, generate a report can be generated that lists devices with weak authentication credentials. - **Visibility**: Generate a report that covers all query items to view all baseline parameters of your network. -- **PLC security** Improve security by detecting PLCs in unsecure states for example Program and Remote states. +- **PLC security** Improve security by detecting PLCs in unsecure states, for example, Program and Remote states. Data mining information is saved and stored continuously, except for when a device is deleted. Data mining results can be exported and stored externally to a secure server. In addition, the sensor performs automatic daily backups to ensure system continuity and preservation of data. @@ -26,7 +26,7 @@ The following predefined reports are available. These queries are generated in r - **Internet activity**: Devices that are connected to the internet. - **CVEs**: A list of devices detected with known vulnerabilities, along with CVSSv2 risk scores. - **Excluded CVEs**: A list of all the CVEs that were manually excluded. It is possible to customize the CVE list manually so that the VA reports and attack vectors more accurately reflect your network by excluding or including particular CVEs and updating the CVSSv2 score accordingly. -- **Nonactive devices**: Devices that have not communicated for the past seven days. +- **Nonactive devices**: Devices that haven't communicated for the past seven days. - **Active devices**: Active network devices within the last 24 hours. Find these reports in **Analyze** > **Data Mining**. Reports are available for users with Administrator and Security Analyst permissions. Read only users can't access these reports. diff --git a/articles/defender-for-iot/organizations/how-to-create-trends-and-statistics-reports.md b/articles/defender-for-iot/organizations/how-to-create-trends-and-statistics-reports.md index a192751c16f7..cc1ccd97e47b 100644 --- a/articles/defender-for-iot/organizations/how-to-create-trends-and-statistics-reports.md +++ b/articles/defender-for-iot/organizations/how-to-create-trends-and-statistics-reports.md @@ -50,7 +50,7 @@ Protocol dissection | Displays a pie chart that provides you with a look at the Active TCP connections | Displays a chart that shows the number of active TCP connections in the system. Incident by type | Displays a pie chart that shows the number of incidents by type. This is the number of alerts generated by each engine over a predefined time period. Devices by vendor | Displays a pie chart that shows the number of devices by vendor. The number of devices for a specific vendor is proportional to the size of that device’s vendor part of the disk relative to other device vendors. -Number of devices per VLAN | Displays a pie chart that shows the number of discovered devices per VLAN. The size of each slice of the pie is proportional to the number of discovered devices relative to the other slices. Each VLAN appears with the VLAN tag assigned by the sensor or name that you have manually added. +Number of devices per VLAN | Displays a pie chart that shows the number of discovered devices per VLAN. The size of each slice of the pie is proportional to the number of discovered devices relative to the other slices. Each VLAN appears with the VLAN tag assigned by the sensor or name that you've manually added. Top bandwidth by VLAN | Displays the bandwidth consumption by VLAN. By default, the widget shows five VLANs with the highest bandwidth usage. You can filter the data by the period presented in the widget. Select the down arrow to show more results. diff --git a/articles/defender-for-iot/organizations/how-to-gain-insight-into-global-regional-and-local-threats.md b/articles/defender-for-iot/organizations/how-to-gain-insight-into-global-regional-and-local-threats.md index f58eeadd8e70..e531e6451740 100644 --- a/articles/defender-for-iot/organizations/how-to-gain-insight-into-global-regional-and-local-threats.md +++ b/articles/defender-for-iot/organizations/how-to-gain-insight-into-global-regional-and-local-threats.md @@ -9,7 +9,7 @@ ms.topic: how-to The site map in the on-premises management console helps you achieve full security coverage by dividing your network into geographical and logical segments that reflect your business topology: -- **Geographical facility level**: A site reflects a number of devices grouped according to a geographical location presented on the map. By default, Microsoft Defender for IoT provides you with a world map. You update the map to reflect your organizational or business structure. For example, use a map that reflects sites across a specific country, city, or industrial campus. When the site color changes on the map, it provides the SOC team with an indication of critical system status in the facility. +- **Geographical facility level**: A site reflects many devices grouped according to a geographical location presented on the map. By default, Microsoft Defender for IoT provides you with a world map. You update the map to reflect your organizational or business structure. For example, use a map that reflects sites across a specific country, city, or industrial campus. When the site color changes on the map, it provides the SOC team with an indication of critical system status in the facility. The map is interactive and enables opening each site and delving into this site's information. diff --git a/articles/defender-for-iot/organizations/how-to-install-software.md b/articles/defender-for-iot/organizations/how-to-install-software.md index 6190da88824d..929314f272ab 100644 --- a/articles/defender-for-iot/organizations/how-to-install-software.md +++ b/articles/defender-for-iot/organizations/how-to-install-software.md @@ -185,7 +185,7 @@ This procedure describes how to add a secondary NIC if you've already installed ### Find your port -If you are having trouble locating the physical port on your device, you can use the following command to find your port: +If you're having trouble locating the physical port on your device, you can use the following command to find your port: ```bash sudo ethtool -p diff --git a/articles/defender-for-iot/organizations/how-to-investigate-all-enterprise-sensor-detections-in-a-device-inventory.md b/articles/defender-for-iot/organizations/how-to-investigate-all-enterprise-sensor-detections-in-a-device-inventory.md index ade9b88066cc..c726866b844b 100644 --- a/articles/defender-for-iot/organizations/how-to-investigate-all-enterprise-sensor-detections-in-a-device-inventory.md +++ b/articles/defender-for-iot/organizations/how-to-investigate-all-enterprise-sensor-detections-in-a-device-inventory.md @@ -31,13 +31,13 @@ The following table describes the table columns in the device inventory. | **MAC Address** | The MAC address of the device. | | **Protocols** | The protocols that the device uses. | | **Unacknowledged Alerts** | The number of unhandled alerts associated with this device. | -| **Is Authorized** | The authorization status of the device:
    - **True**: The device has been authorized.
    - **False**: The device has not been authorized. | +| **Is Authorized** | The authorization status of the device:
    - **True**: The device has been authorized.
    - **False**: The device hasn't been authorized. | | **Is Known as Scanner** | Whether this device performs scanning-like activities in the network. | -| **Is Programming Device** | Whether this is a programming device:
    - **True**: The device performs programming activities for PLCs, RTUs, and controllers, which are relevant to engineering stations.
    - **False**: The device is not a programming device. | +| **Is Programming Device** | Whether this is a programming device:
    - **True**: The device performs programming activities for PLCs, RTUs, and controllers, which are relevant to engineering stations.
    - **False**: The device isn't a programming device. | | **Groups** | Groups in which this device participates. | | **Last Activity** | The last activity that the device performed. | | **Discovered** | When this device was first seen in the network. | -| **PLC mode (preview)** | The PLC operating mode includes the Key state (physical) and run state (logical). Possible **Key** states include, Run, Program, Remote, Stop, Invalid, Programming Disabled.Possible Run. The possible **Run** states are Run, Program, Stop, Paused, Exception, Halted, Trapped, Idle, Offline. if both states are the same, only oe state is presented. | +| **PLC mode (preview)** | The PLC operating mode includes the Key state (physical) and run state (logical). Possible **Key** states include, Run, Program, Remote, Stop, Invalid, Programming Disabled.Possible Run. The possible **Run** states are Run, Program, Stop, Paused, Exception, Halted, Trapped, Idle, Offline. if both states are the same, only one state is presented. | ## What is an Inventory device? diff --git a/articles/defender-for-iot/organizations/how-to-manage-individual-sensors.md b/articles/defender-for-iot/organizations/how-to-manage-individual-sensors.md index 3e6373adc524..e67922af3e24 100644 --- a/articles/defender-for-iot/organizations/how-to-manage-individual-sensors.md +++ b/articles/defender-for-iot/organizations/how-to-manage-individual-sensors.md @@ -27,7 +27,7 @@ You can continue to work with Defender for IoT features even if the activation f ### About activation files for cloud-connected sensors -Sensors that are cloud connected are not limited by time periods for their activation file. The activation file for cloud-connected sensors is used to ensure the connection to Defender for IoT. +Sensors that are cloud connected aren't limited by time periods for their activation file. The activation file for cloud-connected sensors is used to ensure the connection to Defender for IoT. ### Upload new activation files @@ -65,9 +65,9 @@ You might need to upload a new activation file for an onboarded sensor when: ### Troubleshoot activation file upload -You'll receive an error message if the activation file could not be uploaded. The following events might have occurred: +You'll receive an error message if the activation file couldn't be uploaded. The following events might have occurred: -- **For locally connected sensors**: The activation file is not valid. If the file is not valid, go to [Defender for IoT in the Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_IoT_Defender/IoTDefenderDashboard/Getting_Started). On the **Sensor Management** page, select the sensor with the invalid file, and download a new activation file. +- **For locally connected sensors**: The activation file isn't valid. If the file isn't valid, go to [Defender for IoT in the Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_IoT_Defender/IoTDefenderDashboard/Getting_Started). On the **Sensor Management** page, select the sensor with the invalid file, and download a new activation file. - **For cloud-connected sensors**: The sensor can't connect to the internet. Check the sensor's network configuration. If your sensor needs to connect through a web proxy to access the internet, verify that your proxy server is configured correctly on the **Sensor Network Configuration** screen. Verify that \*.azure-devices.net:443 is allowed in the firewall and/or proxy. If wildcards are not supported or you want more control, the FQDN for your specific endpoint (either a sensor, or for legacy connections, an IoT hub) should be opened in your firewall and/or proxy. For more information, see [Reference - IoT Hub endpoints](../../iot-hub/iot-hub-devguide-endpoints.md). diff --git a/articles/defender-for-iot/organizations/how-to-manage-subscriptions.md b/articles/defender-for-iot/organizations/how-to-manage-subscriptions.md index 1b861a6696b6..eb21f3d2a151 100644 --- a/articles/defender-for-iot/organizations/how-to-manage-subscriptions.md +++ b/articles/defender-for-iot/organizations/how-to-manage-subscriptions.md @@ -9,7 +9,7 @@ ms.topic: how-to Your Defender for IoT deployment is managed through your Microsoft Defender for IoT account subscriptions. You can onboard, edit, and offboard your subscriptions to Defender for IoT in the [Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_IoT_Defender/IoTDefenderDashboard/Getting_Started). -For each subscription, you will be asked to define a number of *committed devices*. Committed devices are the approximate number of devices that will be monitored in your enterprise. +For each subscription, you'll be asked to define a number of *committed devices*. Committed devices are the approximate number of devices that will be monitored in your enterprise. > [!NOTE] > If you've come to this page because you are a [former CyberX customer](https://blogs.microsoft.com/blog/2020/06/22/microsoft-acquires-cyberx-to-accelerate-and-secure-customers-iot-deployments) and have questions about your account, reach out to your account manager for guidance. @@ -17,7 +17,7 @@ For each subscription, you will be asked to define a number of *committed device ## Subscription billing -You are billed based on the number of committed devices associated with each subscription. +You're billed based on the number of committed devices associated with each subscription. The billing cycle for Microsoft Defender for IoT follows a calendar month. Changes you make to committed devices during the month are implemented one hour after confirming your update, and are reflected in your monthly bill. Subscription *offboarding* also takes effect one hour after confirming the offboard. @@ -25,7 +25,7 @@ Your enterprise may have more than one paying entity. If this is the case you ca Before you subscribe, you should have a sense of how many devices you would like your subscriptions to cover. -Users can also work with trial subscription, which supports monitoring a limited number of devices for 30 days. See [Microsoft Defender for IoT pricing](https://azure.microsoft.com/pricing/details/defender-for-cloud/#defenderforiot) information on committed device prices. +Users can also work with trial subscription, which supports monitoring a limited number of devices for 30 days. See [Microsoft Defender for IoT pricing](https://azure.microsoft.com/pricing/details/iot-defender/) information on committed device prices. ## Requirements @@ -76,11 +76,11 @@ This section describes how to onboard a subscription. 1. Select **Subscribe**. 1. Confirm your subscription. -1. If you have not done so already, onboard a sensor or Set up a sensor. +1. If you haven't done so already, onboard a sensor or Set up a sensor. ## Update committed devices in a subscription -You may need to update your subscription with more committed devices, or more fewer committed devices. More devices may require monitoring if, for example, you are increasing existing site coverage, discovered more devices than expected or there are network changes such as adding switches. +You may need to update your subscription with more committed devices, or fewer committed devices. More devices may require monitoring if, for example, you are increasing existing site coverage, discovered more devices than expected or there are network changes such as adding switches. **To update a subscription:** 1. Go to [Defender for IoT: Getting started](https://portal.azure.com/#blade/Microsoft_Azure_IoT_Defender/IoTDefenderDashboard/Getting_Started) in the Azure portal. diff --git a/articles/defender-for-iot/organizations/how-to-manage-the-alert-event.md b/articles/defender-for-iot/organizations/how-to-manage-the-alert-event.md index 3c7babf15669..89ca75f8e6b2 100644 --- a/articles/defender-for-iot/organizations/how-to-manage-the-alert-event.md +++ b/articles/defender-for-iot/organizations/how-to-manage-the-alert-event.md @@ -209,8 +209,8 @@ Users working with alerts on the Defender for IoT portal on Azure should underst Parameter | Description |--|--| | **Alert Exclusion rules**| Alert *Exclusion rules* defined in the on-premises management console impact the alerts triggered by managed sensors. As a result, the alerts excluded by these rules also won't be displayed in the Alerts page on the portal. For more information, see [Create alert exclusion rules](how-to-work-with-alerts-on-premises-management-console.md#create-alert-exclusion-rules). -| **Managing alerts on your sensor** | If you change the status of an alert, or learn or mute an alert on a sensor, the changes are not updated in the Defender for IoT Alerts page on the portal. This means that this alert will stay open on the portal. However another alert won't be triggered from sensor for this activity. -| **Managing alerts in the portal Alerts page** | Changing the status of an alert on the Azure portal, Alerts page or changing the alert severity on the portal, does not impact the alert status or severity in on-premises sensors. +| **Managing alerts on your sensor** | If you change the status of an alert, or learn or mute an alert on a sensor, the changes are not updated in the Defender for IoT Alerts page on the portal. This means that this alert will stay open on the portal. However another alert won't be triggered from the sensor for this activity. +| **Managing alerts in the portal Alerts page** | Changing the status of an alert on the Azure portal, Alerts page or changing the alert severity on the portal, doesn't impact the alert status or severity in on-premises sensors. ## Next steps diff --git a/articles/defender-for-iot/organizations/how-to-set-up-high-availability.md b/articles/defender-for-iot/organizations/how-to-set-up-high-availability.md index 32d4ec251644..cfa42255560d 100644 --- a/articles/defender-for-iot/organizations/how-to-set-up-high-availability.md +++ b/articles/defender-for-iot/organizations/how-to-set-up-high-availability.md @@ -20,13 +20,13 @@ When a primary and secondary on-premises management console is paired: - The primary on-premises management console data is automatically backed up to the secondary on-premises management console every 10 minutes. The on-premises management console configurations and device data are backed up. PCAP files and logs are not included in the backup. You can back up and restore of PCAPs and logs manually. -- The primary setup at the management console is duplicated on the secondary; for example, system settings. If these settings are updated on the primary, they are also updated on the secondary. +- The primary setup at the management console is duplicated on the secondary; for example, system settings. If these settings are updated on the primary, they're also updated on the secondary. - Before the license of the secondary expires, you should define it as the primary in order to update the license. ## About failover and failback -If a sensor cannot connect to the primary on-premises management console, it automatically connects to the secondary. Your system will be supported by both the primary and secondary simultaneously, if less than half of the sensors are communicating with the secondary. The secondary takes over when more than half of the sensors are communicating with it. Fail over from the primary to the secondary takes approximately three minutes. When the failover occurs, the primary on-premises management console freezes. When this happens, you can sign in to the secondary using the same sign-in credentials. +If a sensor can't connect to the primary on-premises management console, it automatically connects to the secondary. Your system will be supported by both the primary and secondary simultaneously, if less than half of the sensors are communicating with the secondary. The secondary takes over when more than half of the sensors are communicating with it. Fail over from the primary to the secondary takes approximately three minutes. When the failover occurs, the primary on-premises management console freezes. When this happens, you can sign in to the secondary using the same sign-in credentials. During failover, sensors continue attempting to communicate with the primary appliance. When more than half the managed sensors succeed to communicate with the primary, the primary is restored. The following message appears at the secondary console when the primary is restored. @@ -48,7 +48,7 @@ The installation and configuration procedures are performed in four main stages: ## High availability requirements -Verify that you have met the following high availability requirements: +Verify that you've met the following high availability requirements: - Certificate requirements diff --git a/articles/defender-for-iot/organizations/how-to-set-up-snmp-mib-monitoring.md b/articles/defender-for-iot/organizations/how-to-set-up-snmp-mib-monitoring.md index 4e71bdd07254..a1f7919609aa 100644 --- a/articles/defender-for-iot/organizations/how-to-set-up-snmp-mib-monitoring.md +++ b/articles/defender-for-iot/organizations/how-to-set-up-snmp-mib-monitoring.md @@ -1,7 +1,7 @@ --- title: Set up SNMP MIB monitoring description: You can perform sensor health monitoring by using SNMP. The sensor responds to SNMP queries sent from an authorized monitoring server. -ms.date: 01/31/2022 +ms.date: 05/31/2022 ms.topic: how-to --- @@ -35,7 +35,7 @@ Note that: ## Prerequisites for AES and 3-DES Encryption Support for SNMP Version 3 - The network management station (NMS) must support Simple Network Management Protocol (SNMP) Version 3 to be able to use this feature. -- It is important to understand the SNMP architecture and the terminology of the architecture to understand the security model used and how the security model interacts with the other subsystems in the architecture. +- It's important to understand the SNMP architecture and the terminology of the architecture to understand the security model used and how the security model interacts with the other subsystems in the architecture. - Before you begin configuring SNMP monitoring, you need to open the port UDP 161 in the firewall. @@ -50,7 +50,7 @@ Note that: | Parameter | Description | |--|--| - | **Username** | The SNMP username can contain up to 32 characters and include any combination of alphanumeric characters (uppercase letters, lowercase letters, and numbers). Spaces are not allowed.

    The username for the SNMP v3 authentication must be configured on the system and on the SNMP server. | + | **Username** | The SNMP username can contain up to 32 characters and include any combination of alphanumeric characters (uppercase letters, lowercase letters, and numbers). Spaces aren't allowed.

    The username for the SNMP v3 authentication must be configured on the system and on the SNMP server. | | **Password** | Enter a case-sensitive authentication password. The authentication password can contain 8 to 12 characters and include any combination of alphanumeric characters (uppercase letters, lowercase letters, and numbers).

    The username for the SNMP v3 authentication must be configured on the system and on the SNMP server. | | **Auth Type** | Select MD5 or SHA-1. | | **Encryption** | Select DES (56 bit key size)[1](#1) or AES (AES 128 bits supported)[2](#2). | diff --git a/articles/defender-for-iot/organizations/how-to-view-information-per-zone.md b/articles/defender-for-iot/organizations/how-to-view-information-per-zone.md index 224d9b0fe4aa..d19952e1a7f7 100644 --- a/articles/defender-for-iot/organizations/how-to-view-information-per-zone.md +++ b/articles/defender-for-iot/organizations/how-to-view-information-per-zone.md @@ -51,7 +51,7 @@ The following tools are available for viewing devices and device information fro To view alerts associated with a specific zone: -- Select the alert icon form the **Zone** window. +- Select the alert icon from the **Zone** window. :::image type="content" source="media/how-to-work-with-asset-inventory-information/business-unit-view-v2.png" alt-text="The default Business Unit view with examples."::: @@ -77,7 +77,7 @@ The following additional zone information is available: - **Connectivity status**: If a sensor is disconnected, connect from the sensor. See [Connect sensors to the on-premises management console](how-to-activate-and-set-up-your-on-premises-management-console.md#connect-sensors-to-the-on-premises-management-console). -- **Update progress**: If the connected sensor is being upgraded, upgrade statuses will appear. During upgrade, the on-premises management console does not receive device information from the sensor. +- **Update progress**: If the connected sensor is being upgraded, upgrade statuses will appear. During upgrade, the on-premises management console doesn't receive device information from the sensor. ## Next steps diff --git a/articles/defender-for-iot/organizations/how-to-work-with-threat-intelligence-packages.md b/articles/defender-for-iot/organizations/how-to-work-with-threat-intelligence-packages.md index 670aae45fee1..a28ebc7cfa4a 100644 --- a/articles/defender-for-iot/organizations/how-to-work-with-threat-intelligence-packages.md +++ b/articles/defender-for-iot/organizations/how-to-work-with-threat-intelligence-packages.md @@ -102,7 +102,7 @@ This option is available for both *cloud connected* and *locally managed* sensor ## Review package update status on the sensor ## -The package update status and version information is displayed in the sensor **System Settings**, **Threat Intelligence** section. +The package update status and version information are displayed in the sensor **System Settings**, **Threat Intelligence** section. ## Review package information for cloud connected sensors ## diff --git a/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-apis.md b/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-apis.md index 52eeeb45aed9..8b0520f5e1a1 100644 --- a/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-apis.md +++ b/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-apis.md @@ -7,7 +7,7 @@ ms.topic: reference # Defender for IoT sensor and management console APIs -Defender for IoT APIs are governed by [Microsoft API License and Terms of use](/legal/microsoft-apis/terms-of-use). +Defender for IoT APIs is governed by [Microsoft API License and Terms of use](/legal/microsoft-apis/terms-of-use). Use an external REST API to access the data discovered by sensors and management consoles and perform actions with that data. @@ -239,11 +239,11 @@ Message string with the operation status details: - **Failure – error**: User authentication failure -- **Failure – error**: User does not exist +- **Failure – error**: User doesn't exist - **Failure – error**: Password doesn't match security policy -- **Failure – error**: User does not have the permissions to change password +- **Failure – error**: User doesn't have the permissions to change password #### Response example @@ -2501,13 +2501,15 @@ Define conditions under which alerts won't be sent. For example, define and upda The APIs that you define here appear in the on-premises management console's Alert Exclusions window as a read-only exclusion rule. +This API is supported for maintenance purposes only and is not meant to be used instead of [alert exclusion rules](/azure/defender-for-iot/organizations/how-to-work-with-alerts-on-premises-management-console#create-alert-exclusion-rules). Use this API for one-time maintenance operations only. + #### Method - POST #### Query parameters - **ticketId**: Defines the maintenance ticket ID in the user's systems. -- **ttl**: Defines the TTL (time to live), which is the duration of the maintenance window in minutes. After the period of time that this parameter defines, the system automatically starts sending alerts. +- **ttl**: Required. Defines the TTL (time to live), which is the duration of the maintenance window in minutes. After the period of time that this parameter defines, the system automatically starts sending alerts. - **engines**: Defines from which security engine to suppress alerts during the maintenance process: @@ -2777,7 +2779,7 @@ The below API's can be used with the ServiceNow integration via the ServiceNow's - Type: JSON - Structure: - - “**u_id**” - the internal id of the device. + - “**u_id**” - the internal ID of the device. - “**u_vendor**” - the name of the vendor. - “**u_mac_address_objects**” - array of - “**u_mac_address**” - mac address of the device. @@ -2804,7 +2806,7 @@ The below API's can be used with the ServiceNow integration via the ServiceNow's - “**u_protocol**” - protocol the device uses. - “**u_purdue_layer**” - the purdue layer that was manually set by the user. - “**u_sensor_ids**” - array of - - “**u_sensor_id**” - the id of the sensor that saw the device. + - “**u_sensor_id**” - the ID of the sensor that saw the device. - “**u_device_urls**” - array of - “**u_device_url**” the URL to view the device in the sensor. - “**u_firmwares**” - array of @@ -2829,7 +2831,7 @@ The below API's can be used with the ServiceNow integration via the ServiceNow's - Type: JSON - Structure: - Array of - - “**u_id**” - the id of the deleted device. + - “**u_id**” - the ID of the deleted device. ### Sensors @@ -2843,7 +2845,7 @@ The below API's can be used with the ServiceNow integration via the ServiceNow's - Type: JSON - Structure: - Array of - - “**u_id**” - internal sensor id, to be used in the devices API. + - “**u_id**” - internal sensor ID, to be used in the devices API. - “**u_name**” - the name of the appliance. - “**u_connection_state**” - connectivity with the CM state. One of the following: - “**SYNCED**” - Connection is successful. diff --git a/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-cli-commands.md b/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-cli-commands.md index 53274e5a21f1..6e3f16de9144 100644 --- a/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-cli-commands.md +++ b/articles/defender-for-iot/organizations/references-work-with-defender-for-iot-cli-commands.md @@ -111,7 +111,7 @@ The following table describes the commands available to configure your network o ## Network capture filter configuration -The `network capture-filter` command allows administrators to eliminate network traffic that doesn't need to be analyzed. You can filter traffic by using an include list, or an exclude list. This command does not support the malware detection engine. +The `network capture-filter` command allows administrators to eliminate network traffic that doesn't need to be analyzed. You can filter traffic by using an include list, or an exclude list. This command doesn't support the malware detection engine. ```azurecli-interactive network capture-filter @@ -175,9 +175,9 @@ You're asked the following question: Your options are: `all`, `dissector`, `collector`, `statistics-collector`, `rpc-parser`, or `smb-parser`. -In most common use cases, we recommend that you select `all`. Selecting `all` does not include the malware detection engine, which is not supported by this command. +In most common use cases, we recommend that you select `all`. Selecting `all` doesn't include the malware detection engine, which isn't supported by this command. -### Custom base capture filter +### Custom base capture filter The base capture filter is the baseline for the components. For example, the filter determines which ports are available to the component. diff --git a/articles/defender-for-iot/organizations/resources-frequently-asked-questions.md b/articles/defender-for-iot/organizations/resources-frequently-asked-questions.md index 207d89619844..ba77ab1a0a9a 100644 --- a/articles/defender-for-iot/organizations/resources-frequently-asked-questions.md +++ b/articles/defender-for-iot/organizations/resources-frequently-asked-questions.md @@ -20,7 +20,7 @@ Microsoft Defender for IoT provides comprehensive protocol support. In addition - Secure proprietary information by developing on-site as an external plugin. - Localize text for alerts, events, and protocol parameters. -This unique solution for developing protocols as plugins, does not require dedicated developer teams or version releases in order to support a new protocol. Developers, partners, and customers can securely develop protocols and share insights and knowledge using Horizon. +This unique solution for developing protocols as plugins, doesn't require dedicated developer teams or version releases in order to support a new protocol. Developers, partners, and customers can securely develop protocols and share insights and knowledge using Horizon. ## Do I have to purchase hardware appliances from Microsoft partners? Microsoft Defender for IoT sensor runs on specific hardware specs as described in the [Hardware Specifications Guide](./how-to-identify-required-appliances.md), customers can purchase certified hardware from Microsoft partners or use the supplied bill of materials (BOM) and purchase it on their own. @@ -28,7 +28,7 @@ Microsoft Defender for IoT sensor runs on specific hardware specs as described i Certified hardware has been tested in our labs for driver stability, packet drops and network sizing. -## Regulation does not allow us to connect our system to the Internet. Can we still utilize Defender for IoT? +## Regulation doesn't allow us to connect our system to the Internet. Can we still utilize Defender for IoT? Yes you can! The Microsoft Defender for IoT platform on-premises solution is deployed as a physical or virtual sensor appliance that passively ingests network traffic (via SPAN, RSPAN, or TAP) to analyze, discover, and continuously monitor IT, OT, and IoT networks. For larger enterprises, multiple sensors can aggregate their data to an on-premises management console. @@ -76,7 +76,7 @@ You can work with CLI [commands](references-work-with-defender-for-iot-cli-comma ## How do I check the sanity of my deployment -After installing the software for your sensor or on-premises management console, you will want to perform the [Post-installation validation](how-to-install-software.md#post-installation-validation). +After installing the software for your sensor or on-premises management console, you'll want to perform the [Post-installation validation](how-to-install-software.md#post-installation-validation). You can also use our [UI and CLI tools](how-to-troubleshoot-the-sensor-and-on-premises-management-console.md#check-system-health) to check system health and review your overall system statistics. diff --git a/articles/expressroute/expressroute-locations.md b/articles/expressroute/expressroute-locations.md index b49b0ab2309b..4d4dc0be816d 100644 --- a/articles/expressroute/expressroute-locations.md +++ b/articles/expressroute/expressroute-locations.md @@ -77,7 +77,7 @@ The following table shows locations by service provider. If you want to view ava | **[Deutsche Telekom AG](https://www.t-systems.com/de/en/cloud-and-infrastructure/manage-it-efficiently/managed-azure/cloudconnect-for-azure)** | Supported |Supported | Frankfurt2 | | **du datamena** |Supported |Supported | Dubai2 | | **[eir](https://www.eirevo.ie/cloud-services/cloud-connectivity)** |Supported |Supported | Dublin| -| **[Epsilon Global Communications](https://www.epsilontel.com/solutions/direct-cloud-connect)** |Supported |Supported | Singapore, Singapore2 | +| **[Epsilon Global Communications](https://epsilontel.com/solutions/cloud-connect/)** |Supported |Supported | Singapore, Singapore2 | | **[Equinix](https://www.equinix.com/partners/microsoft-azure/)** |Supported |Supported | Amsterdam, Amsterdam2, Atlanta, Berlin, Bogota, Canberra2, Chicago, Dallas, Dubai2, Dublin, Frankfurt, Frankfurt2, Geneva, Hong Kong SAR, London, London2, Los Angeles*, Los Angeles2, Melbourne, Miami, Milan, New York, Osaka, Paris, Quebec City, Rio de Janeiro, Sao Paulo, Seattle, Seoul, Silicon Valley, Singapore, Singapore2, Stockholm, Sydney, Tokyo, Toronto, Washington DC, Zurich

    **New ExpressRoute circuits are no longer supported with Equinix in Los Angeles. Please create new circuits in Los Angeles2.* | | **Etisalat UAE** |Supported |Supported | Dubai | | **[euNetworks](https://eunetworks.com/services/solutions/cloud-connect/microsoft-azure-expressroute/)** |Supported |Supported | Amsterdam, Amsterdam2, Dublin, Frankfurt, London | diff --git a/articles/governance/policy/samples/gov-dod-impact-level-4.md b/articles/governance/policy/samples/gov-dod-impact-level-4.md index 00d018237108..d44fcdd4de0b 100644 --- a/articles/governance/policy/samples/gov-dod-impact-level-4.md +++ b/articles/governance/policy/samples/gov-dod-impact-level-4.md @@ -243,7 +243,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Azure Web Application Firewall should be enabled for Azure Front Door entry-points](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F055aa869-bc98-4af8-bafc-23f1ab6ffe2c) |Deploy Azure Web Application Firewall (WAF) in front of public facing web applications for additional inspection of incoming traffic. Web Application Firewall (WAF) provides centralized protection of your web applications from common exploits and vulnerabilities such as SQL injections, Cross-Site Scripting, local and remote file executions. You can also restrict access to your web applications by countries, IP address ranges, and other http(s) parameters via custom rules. |Audit, Deny, Disabled |[1.0.2](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Network/WAF_AFD_Enabled_Audit.json) | |[Cognitive Services accounts should disable public network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F0725b4dd-7e76-479c-a735-68e7ee23d5ca) |Disabling public network access improves security by ensuring that Cognitive Services account isn't exposed on the public internet. Creating private endpoints can limit exposure of Cognitive Services account. Learn more at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_DisablePublicNetworkAccess_Audit.json) | @@ -291,7 +291,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Azure Web Application Firewall should be enabled for Azure Front Door entry-points](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F055aa869-bc98-4af8-bafc-23f1ab6ffe2c) |Deploy Azure Web Application Firewall (WAF) in front of public facing web applications for additional inspection of incoming traffic. Web Application Firewall (WAF) provides centralized protection of your web applications from common exploits and vulnerabilities such as SQL injections, Cross-Site Scripting, local and remote file executions. You can also restrict access to your web applications by countries, IP address ranges, and other http(s) parameters via custom rules. |Audit, Deny, Disabled |[1.0.2](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Network/WAF_AFD_Enabled_Audit.json) | |[Cognitive Services accounts should disable public network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F0725b4dd-7e76-479c-a735-68e7ee23d5ca) |Disabling public network access improves security by ensuring that Cognitive Services account isn't exposed on the public internet. Creating private endpoints can limit exposure of Cognitive Services account. Learn more at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_DisablePublicNetworkAccess_Audit.json) | @@ -684,7 +684,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Cognitive Services accounts should disable public network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F0725b4dd-7e76-479c-a735-68e7ee23d5ca) |Disabling public network access improves security by ensuring that Cognitive Services account isn't exposed on the public internet. Creating private endpoints can limit exposure of Cognitive Services account. Learn more at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_DisablePublicNetworkAccess_Audit.json) | |[Cognitive Services accounts should restrict network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F037eea7a-bd0a-46c5-9a66-03aea78705d3) |Network access to Cognitive Services accounts should be restricted. Configure network rules so only applications from allowed networks can access the Cognitive Services account. To allow connections from specific internet or on-premises clients, access can be granted to traffic from specific Azure virtual networks or to public internet IP address ranges. |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_NetworkAcls_Audit.json) | @@ -768,7 +768,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Cognitive Services should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fcddd188c-4b82-4c48-a19d-ddf74ee66a01) |Azure Private Link lets you connect your virtual networks to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Cognitive Services, you'll reduce the potential for data leakage. Learn more about private links at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_EnablePrivateEndpoints_Audit.json) | |[Container registries should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fe8eef0a8-67cf-4eb4-9386-14b0e78733d4) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network.By mapping private endpoints to your container registries instead of the entire service, you'll also be protected against data leakage risks. Learn more at: [https://aka.ms/acr/private-link](../../../container-registry/container-registry-private-link.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Container%20Registry/ACR_PrivateEndpointEnabled_Audit.json) | @@ -806,7 +806,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Cognitive Services should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fcddd188c-4b82-4c48-a19d-ddf74ee66a01) |Azure Private Link lets you connect your virtual networks to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Cognitive Services, you'll reduce the potential for data leakage. Learn more about private links at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_EnablePrivateEndpoints_Audit.json) | |[Container registries should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fe8eef0a8-67cf-4eb4-9386-14b0e78733d4) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network.By mapping private endpoints to your container registries instead of the entire service, you'll also be protected against data leakage risks. Learn more at: [https://aka.ms/acr/private-link](../../../container-registry/container-registry-private-link.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Container%20Registry/ACR_PrivateEndpointEnabled_Audit.json) | @@ -1125,4 +1125,4 @@ Additional articles about Azure Policy: - See the [initiative definition structure](../concepts/initiative-definition-structure.md). - Review other examples at [Azure Policy samples](./index.md). - Review [Understanding policy effects](../concepts/effects.md). -- Learn how to [remediate non-compliant resources](../how-to/remediate-resources.md). \ No newline at end of file +- Learn how to [remediate non-compliant resources](../how-to/remediate-resources.md). diff --git a/articles/governance/policy/samples/gov-dod-impact-level-5.md b/articles/governance/policy/samples/gov-dod-impact-level-5.md index 0e85189527d4..a18a3e649dfd 100644 --- a/articles/governance/policy/samples/gov-dod-impact-level-5.md +++ b/articles/governance/policy/samples/gov-dod-impact-level-5.md @@ -243,7 +243,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Azure Web Application Firewall should be enabled for Azure Front Door entry-points](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F055aa869-bc98-4af8-bafc-23f1ab6ffe2c) |Deploy Azure Web Application Firewall (WAF) in front of public facing web applications for additional inspection of incoming traffic. Web Application Firewall (WAF) provides centralized protection of your web applications from common exploits and vulnerabilities such as SQL injections, Cross-Site Scripting, local and remote file executions. You can also restrict access to your web applications by countries, IP address ranges, and other http(s) parameters via custom rules. |Audit, Deny, Disabled |[1.0.2](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Network/WAF_AFD_Enabled_Audit.json) | |[Cognitive Services accounts should disable public network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F0725b4dd-7e76-479c-a735-68e7ee23d5ca) |Disabling public network access improves security by ensuring that Cognitive Services account isn't exposed on the public internet. Creating private endpoints can limit exposure of Cognitive Services account. Learn more at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_DisablePublicNetworkAccess_Audit.json) | @@ -291,7 +291,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Azure Web Application Firewall should be enabled for Azure Front Door entry-points](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F055aa869-bc98-4af8-bafc-23f1ab6ffe2c) |Deploy Azure Web Application Firewall (WAF) in front of public facing web applications for additional inspection of incoming traffic. Web Application Firewall (WAF) provides centralized protection of your web applications from common exploits and vulnerabilities such as SQL injections, Cross-Site Scripting, local and remote file executions. You can also restrict access to your web applications by countries, IP address ranges, and other http(s) parameters via custom rules. |Audit, Deny, Disabled |[1.0.2](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Network/WAF_AFD_Enabled_Audit.json) | |[Cognitive Services accounts should disable public network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F0725b4dd-7e76-479c-a735-68e7ee23d5ca) |Disabling public network access improves security by ensuring that Cognitive Services account isn't exposed on the public internet. Creating private endpoints can limit exposure of Cognitive Services account. Learn more at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_DisablePublicNetworkAccess_Audit.json) | @@ -684,7 +684,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Cognitive Services accounts should disable public network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F0725b4dd-7e76-479c-a735-68e7ee23d5ca) |Disabling public network access improves security by ensuring that Cognitive Services account isn't exposed on the public internet. Creating private endpoints can limit exposure of Cognitive Services account. Learn more at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_DisablePublicNetworkAccess_Audit.json) | |[Cognitive Services accounts should restrict network access](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F037eea7a-bd0a-46c5-9a66-03aea78705d3) |Network access to Cognitive Services accounts should be restricted. Configure network rules so only applications from allowed networks can access the Cognitive Services account. To allow connections from specific internet or on-premises clients, access can be granted to traffic from specific Azure virtual networks or to public internet IP address ranges. |Audit, Deny, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_NetworkAcls_Audit.json) | @@ -768,7 +768,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Cognitive Services should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fcddd188c-4b82-4c48-a19d-ddf74ee66a01) |Azure Private Link lets you connect your virtual networks to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Cognitive Services, you'll reduce the potential for data leakage. Learn more about private links at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_EnablePrivateEndpoints_Audit.json) | |[Container registries should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fe8eef0a8-67cf-4eb4-9386-14b0e78733d4) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network.By mapping private endpoints to your container registries instead of the entire service, you'll also be protected against data leakage risks. Learn more at: [https://aka.ms/acr/private-link](../../../container-registry/container-registry-private-link.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Container%20Registry/ACR_PrivateEndpointEnabled_Audit.json) | @@ -806,7 +806,7 @@ initiative definition. |[Azure File Sync should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1d320205-c6a1-4ac6-873d-46224024e8e2) |Creating a private endpoint for the indicated Storage Sync Service resource allows you to address your Storage Sync Service resource from within the private IP address space of your organization's network, rather than through the internet-accessible public endpoint. Creating a private endpoint by itself does not disable the public endpoint. |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Azure%20Government/Storage/StorageSync_PrivateEndpoint_AuditIfNotExists.json) | |[Azure Machine Learning workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F40cec1dd-a100-4920-b15b-3024fe8901ab) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Machine Learning workspaces, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/machine-learning/how-to-configure-private-link](../../../machine-learning/how-to-configure-private-link.md). |Audit, Deny, Disabled |[1.1.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Machine%20Learning/Workspace_PrivateEndpoint_Audit.json) | |[Azure Service Bus namespaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F1c06e275-d63d-4540-b761-71f364c2111d) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Service Bus namespaces, data leakage risks are reduced. Learn more at: [https://docs.microsoft.com/azure/service-bus-messaging/private-link-service](../../../service-bus-messaging/private-link-service.md). |AuditIfNotExists, Disabled |[1.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Service%20Bus/ServiceBus_PrivateEndpoint_Audit.json) | -|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit.json) | +|[Azure SignalR Service should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F53503636-bcc9-4748-9663-5348217f160f) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to your Azure SignalR Service resource instead of the entire service, you'll reduce your data leakage risks. Learn more about private links at: [https://aka.ms/asrs/privatelink](../../../azure-signalr/howto-private-endpoints.md). |Audit, Deny, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/SignalR/SignalR_PrivateEndpointEnabled_Audit_v2.json) | |[Azure Synapse workspaces should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F72d11df1-dd8a-41f7-8925-b05b960ebafc) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Azure Synapse workspace, data leakage risks are reduced. Learn more about private links at: [https://docs.microsoft.com/azure/synapse-analytics/security/how-to-connect-to-workspace-with-private-links](../../../synapse-analytics/security/how-to-connect-to-workspace-with-private-links.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Synapse/SynapseWorkspaceUsePrivateLinks_Audit.json) | |[Cognitive Services should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fcddd188c-4b82-4c48-a19d-ddf74ee66a01) |Azure Private Link lets you connect your virtual networks to Azure services without a public IP address at the source or destination. The Private Link platform handles the connectivity between the consumer and services over the Azure backbone network. By mapping private endpoints to Cognitive Services, you'll reduce the potential for data leakage. Learn more about private links at: [https://go.microsoft.com/fwlink/?linkid=2129800](../../../private-link/index.yml). |Audit, Disabled |[2.0.0](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Cognitive%20Services/CognitiveServices_EnablePrivateEndpoints_Audit.json) | |[Container registries should use private link](https://portal.azure.us/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fe8eef0a8-67cf-4eb4-9386-14b0e78733d4) |Azure Private Link lets you connect your virtual network to Azure services without a public IP address at the source or destination. The private link platform handles the connectivity between the consumer and services over the Azure backbone network.By mapping private endpoints to your container registries instead of the entire service, you'll also be protected against data leakage risks. Learn more at: [https://aka.ms/acr/private-link](../../../container-registry/container-registry-private-link.md). |Audit, Disabled |[1.0.1](https://github.com/Azure/azure-policy/blob/master/built-in-policies/policyDefinitions/Container%20Registry/ACR_PrivateEndpointEnabled_Audit.json) | @@ -1125,4 +1125,4 @@ Additional articles about Azure Policy: - See the [initiative definition structure](../concepts/initiative-definition-structure.md). - Review other examples at [Azure Policy samples](./index.md). - Review [Understanding policy effects](../concepts/effects.md). -- Learn how to [remediate non-compliant resources](../how-to/remediate-resources.md). \ No newline at end of file +- Learn how to [remediate non-compliant resources](../how-to/remediate-resources.md). diff --git a/articles/guides/operations/azure-operations-guide.md b/articles/guides/operations/azure-operations-guide.md index 9605c3c974f8..ffe489b7128f 100644 --- a/articles/guides/operations/azure-operations-guide.md +++ b/articles/guides/operations/azure-operations-guide.md @@ -137,7 +137,7 @@ One of the benefits of using Azure is that you can deploy your applications into ### Azure portal -The Azure portal is a web-based application that can be used to create, manage, and remove Azure resources and services. The Azure portal is located at [portal.azure.com](https://portal.azure.com). It includes a customizable dashboard and tooling for managing Azure resources. It also provides billing and subscription information. For more information, see [Microsoft Azure portal overview](https://azure.microsoft.com/documentation/articles/azure-portal-overview/) and [Manage Azure resources through portal](../../azure-resource-manager/management/manage-resources-portal.md). +The Azure portal is a web-based application that can be used to create, manage, and remove Azure resources and services. The Azure portal is located at [portal.azure.com](https://portal.azure.com). It includes a customizable dashboard and tooling for managing Azure resources. It also provides billing and subscription information. For more information, see [Microsoft Azure portal overview](/azure/azure-portal/azure-portal-overview) and [Manage Azure resources through portal](../../azure-resource-manager/management/manage-resources-portal.md). ### Resources diff --git a/articles/hdinsight/cluster-management-best-practices.md b/articles/hdinsight/cluster-management-best-practices.md index 03932e422a88..66c6e8ae608b 100644 --- a/articles/hdinsight/cluster-management-best-practices.md +++ b/articles/hdinsight/cluster-management-best-practices.md @@ -4,7 +4,7 @@ description: Learn best practices for managing HDInsight clusters. ms.service: hdinsight ms.custom: hdinsightactive ms.topic: conceptual -ms.date: 04/11/2020 +ms.date: 05/30/2022 --- # HDInsight cluster management best practices diff --git a/articles/hdinsight/domain-joined/apache-domain-joined-create-configure-enterprise-security-cluster.md b/articles/hdinsight/domain-joined/apache-domain-joined-create-configure-enterprise-security-cluster.md index a162805d63d4..14169b5fdfcd 100644 --- a/articles/hdinsight/domain-joined/apache-domain-joined-create-configure-enterprise-security-cluster.md +++ b/articles/hdinsight/domain-joined/apache-domain-joined-create-configure-enterprise-security-cluster.md @@ -4,7 +4,7 @@ description: Learn how to create and configure Enterprise Security Package clust services: hdinsight ms.service: hdinsight ms.topic: how-to -ms.date: 12/10/2019 +ms.date: 05/31/2022 ms.custom: devx-track-azurepowershell --- diff --git a/articles/hdinsight/hadoop/apache-hadoop-connect-excel-power-query.md b/articles/hdinsight/hadoop/apache-hadoop-connect-excel-power-query.md index 6b98d2f49836..9fd09a3eccf2 100644 --- a/articles/hdinsight/hadoop/apache-hadoop-connect-excel-power-query.md +++ b/articles/hdinsight/hadoop/apache-hadoop-connect-excel-power-query.md @@ -4,7 +4,7 @@ description: Learn how to take advantage of business intelligence components and ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 12/17/2019 +ms.date: 05/30/2022 --- # Connect Excel to Apache Hadoop by using Power Query diff --git a/articles/hdinsight/hadoop/apache-hadoop-develop-deploy-java-mapreduce-linux.md b/articles/hdinsight/hadoop/apache-hadoop-develop-deploy-java-mapreduce-linux.md index 121f64bad271..1c7362bf4a3d 100644 --- a/articles/hdinsight/hadoop/apache-hadoop-develop-deploy-java-mapreduce-linux.md +++ b/articles/hdinsight/hadoop/apache-hadoop-develop-deploy-java-mapreduce-linux.md @@ -4,7 +4,7 @@ description: Learn how to use Apache Maven to create a Java-based MapReduce appl ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive,hdiseo17may2017, devx-track-java -ms.date: 01/16/2020 +ms.date: 05/31/2022 --- # Develop Java MapReduce programs for Apache Hadoop on HDInsight @@ -279,4 +279,4 @@ In this document, you have learned how to develop a Java MapReduce job. See the * [Use Apache Hive with HDInsight](hdinsight-use-hive.md) * [Use MapReduce with HDInsight](hdinsight-use-mapreduce.md) -* [Java Developer Center](https://azure.microsoft.com/develop/java/) \ No newline at end of file +* [Java Developer Center](https://azure.microsoft.com/develop/java/) diff --git a/articles/hdinsight/hadoop/apache-hadoop-hive-pig-udf-dotnet-csharp.md b/articles/hdinsight/hadoop/apache-hadoop-hive-pig-udf-dotnet-csharp.md index 2e70ea719768..f2d8420219c3 100644 --- a/articles/hdinsight/hadoop/apache-hadoop-hive-pig-udf-dotnet-csharp.md +++ b/articles/hdinsight/hadoop/apache-hadoop-hive-pig-udf-dotnet-csharp.md @@ -4,7 +4,7 @@ description: Learn how to use C# user-defined functions (UDF) with Apache Hive a ms.service: hdinsight ms.topic: how-to ms.custom: "hdinsightactive, devx-track-csharp" -ms.date: 12/06/2019 +ms.date: 05/30/2022 --- # Use C# user-defined functions with Apache Hive and Apache Pig on Apache Hadoop in HDInsight diff --git a/articles/hdinsight/hadoop/connect-install-beeline.md b/articles/hdinsight/hadoop/connect-install-beeline.md index e7531d85d3c6..2bc41e036db2 100644 --- a/articles/hdinsight/hadoop/connect-install-beeline.md +++ b/articles/hdinsight/hadoop/connect-install-beeline.md @@ -4,7 +4,7 @@ description: Learn how to connect to the Apache Beeline client to run Hive queri ms.service: hdinsight ms.topic: how-to ms.custom: contperf-fy21q1 -ms.date: 04/07/2021 +ms.date: 05/30/2022 --- # Connect to HiveServer2 using Beeline or install Beeline locally to connect from your local diff --git a/articles/hdinsight/hadoop/hdinsight-troubleshoot-converting-service-principal-certificate.md b/articles/hdinsight/hadoop/hdinsight-troubleshoot-converting-service-principal-certificate.md index 442e89d66c26..3a7e0f30f01f 100644 --- a/articles/hdinsight/hadoop/hdinsight-troubleshoot-converting-service-principal-certificate.md +++ b/articles/hdinsight/hadoop/hdinsight-troubleshoot-converting-service-principal-certificate.md @@ -3,7 +3,7 @@ title: Converting certificate contents to base-64 - Azure HDInsight description: Converting service principal certificate contents to base-64 encoded string format in Azure HDInsight ms.service: hdinsight ms.topic: troubleshooting -ms.date: 07/31/2019 +ms.date: 05/30/2022 ms.custom: devx-track-csharp --- @@ -47,4 +47,4 @@ namespace ConsoleApplication ## Next steps -[!INCLUDE [troubleshooting next steps](../includes/hdinsight-troubleshooting-next-steps.md)] \ No newline at end of file +[!INCLUDE [troubleshooting next steps](../includes/hdinsight-troubleshooting-next-steps.md)] diff --git a/articles/hdinsight/hadoop/hdinsight-troubleshoot-out-disk-space.md b/articles/hdinsight/hadoop/hdinsight-troubleshoot-out-disk-space.md index 69c8d242dda9..55ab151162d4 100644 --- a/articles/hdinsight/hadoop/hdinsight-troubleshoot-out-disk-space.md +++ b/articles/hdinsight/hadoop/hdinsight-troubleshoot-out-disk-space.md @@ -3,7 +3,7 @@ title: Cluster node runs out of disk space in Azure HDInsight description: Troubleshooting Apache Hadoop cluster node disk space issues in Azure HDInsight. ms.service: hdinsight ms.topic: troubleshooting -ms.date: 04/30/2020 +ms.date: 05/30/2022 --- # Scenario: Cluster node runs out of disk space in Azure HDInsight @@ -24,7 +24,7 @@ Apache Yarn application cache may have consumed all available disk space. Your S 1. Use Ambari UI to determine which node is running out of disk space. -1. Determine which folder in the troubling node contributes to most of the disk space. SSH to the node first, then run `df` to list disk usage for all mounts. Usually it is `/mnt` which is a temp disk used by OSS. You can enter into a folder, then type `sudo du -hs` to show summarized file sizes under a folder. If you see a folder similar to `/mnt/resource/hadoop/yarn/local/usercache/livy/appcache/application_1537280705629_0007`, this means the application is still running. This could be due to RDD persistence or intermediate shuffle files. +1. Determine which folder in the troubling node contributes to most of the disk space. SSH to the node first, then run `df` to list disk usage for all mounts. Usually it's `/mnt` that is a temp disk used by OSS. You can enter into a folder, then type `sudo du -hs` to show summarized file sizes under a folder. If you see a folder similar to `/mnt/resource/hadoop/yarn/local/usercache/livy/appcache/application_1537280705629_0007`, this output means the application is still running. This output could be due to RDD persistence or intermediate shuffle files. 1. To mitigate the issue, kill the application, which will release disk space used by that application. @@ -32,15 +32,15 @@ Apache Yarn application cache may have consumed all available disk space. Your S Open the Ambari UI Navigate to YARN --> Configs --> Advanced. - Add the following 2 properties to the custom yarn-site.xml section and save: + Add the following two properties to the custom yarn-site.xml section and save: ``` yarn.nodemanager.localizer.cache.target-size-mb=2048 yarn.nodemanager.localizer.cache.cleanup.interval-ms=300000 ``` -1. If the above does not permanently fix the issue, optimize your application. +1. If the above doesn't permanently fix the issue, optimize your application. ## Next steps -[!INCLUDE [troubleshooting next steps](../includes/hdinsight-troubleshooting-next-steps.md)] \ No newline at end of file +[!INCLUDE [troubleshooting next steps](../includes/hdinsight-troubleshooting-next-steps.md)] diff --git a/articles/hdinsight/hbase/apache-hbase-backup-replication.md b/articles/hdinsight/hbase/apache-hbase-backup-replication.md index 25f019b89d5c..eed5c3c66416 100644 --- a/articles/hdinsight/hbase/apache-hbase-backup-replication.md +++ b/articles/hdinsight/hbase/apache-hbase-backup-replication.md @@ -4,7 +4,7 @@ description: Set up Backup and replication for Apache HBase and Apache Phoenix i ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 12/19/2019 +ms.date: 05/30/2022 --- # Set up backup and replication for Apache HBase and Apache Phoenix on HDInsight diff --git a/articles/hdinsight/hbase/apache-hbase-phoenix-psql.md b/articles/hdinsight/hbase/apache-hbase-phoenix-psql.md index c2834fdcd745..6034407bacde 100644 --- a/articles/hdinsight/hbase/apache-hbase-phoenix-psql.md +++ b/articles/hdinsight/hbase/apache-hbase-phoenix-psql.md @@ -4,7 +4,7 @@ description: Use the psql tool to load bulk load data into Apache Phoenix tables ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 12/17/2019 +ms.date: 05/30/2022 --- # Bulk load data into Apache Phoenix using psql diff --git a/articles/hdinsight/hbase/hbase-troubleshoot-bindexception-address-use.md b/articles/hdinsight/hbase/hbase-troubleshoot-bindexception-address-use.md index 89ddf58787c9..506cc5194317 100644 --- a/articles/hdinsight/hbase/hbase-troubleshoot-bindexception-address-use.md +++ b/articles/hdinsight/hbase/hbase-troubleshoot-bindexception-address-use.md @@ -3,7 +3,7 @@ title: BindException - Address already in use in Azure HDInsight description: BindException - Address already in use in Azure HDInsight ms.service: hdinsight ms.topic: troubleshooting -ms.date: 08/16/2019 +ms.date: 05/30/2022 --- # Scenario: BindException - Address already in use in Azure HDInsight diff --git a/articles/hdinsight/hdinsight-hadoop-compare-storage-options.md b/articles/hdinsight/hdinsight-hadoop-compare-storage-options.md index ba3bcf5c91a5..de96145ba571 100644 --- a/articles/hdinsight/hdinsight-hadoop-compare-storage-options.md +++ b/articles/hdinsight/hdinsight-hadoop-compare-storage-options.md @@ -4,7 +4,7 @@ description: Provides an overview of storage types and how they work with Azure ms.service: hdinsight ms.topic: conceptual ms.custom: seoapr2020 -ms.date: 04/21/2020 +ms.date: 05/30/2022 --- # Compare storage options for use with Azure HDInsight clusters diff --git a/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-portal.md b/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-portal.md index 21c70dfa8fb7..0dced2fca7e1 100644 --- a/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-portal.md +++ b/articles/hdinsight/hdinsight-hadoop-create-linux-clusters-portal.md @@ -4,7 +4,7 @@ description: Learn to create Apache Hadoop, Apache HBase, Apache Storm, or Apach ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive,seoapr2020 -ms.date: 08/06/2020 +ms.date: 05/31/2022 --- # Create Linux-based clusters in HDInsight by using the Azure portal @@ -153,4 +153,4 @@ You've successfully created an HDInsight cluster. Now learn how to work with you * [Use Apache Hive with HDInsight](hadoop/hdinsight-use-hive.md) * [Get started with Apache HBase on HDInsight](hbase/apache-hbase-tutorial-get-started-linux.md) -* [Customize Linux-based HDInsight clusters by using script actions](hdinsight-hadoop-customize-cluster-linux.md) \ No newline at end of file +* [Customize Linux-based HDInsight clusters by using script actions](hdinsight-hadoop-customize-cluster-linux.md) diff --git a/articles/hdinsight/hdinsight-hadoop-customize-cluster-bootstrap.md b/articles/hdinsight/hdinsight-hadoop-customize-cluster-bootstrap.md index 052337d16266..f89a2beb2c57 100644 --- a/articles/hdinsight/hdinsight-hadoop-customize-cluster-bootstrap.md +++ b/articles/hdinsight/hdinsight-hadoop-customize-cluster-bootstrap.md @@ -4,7 +4,7 @@ description: Learn how to customize HDInsight cluster configuration programmatic ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive, devx-track-azurepowershell -ms.date: 04/01/2020 +ms.date: 05/31/2022 --- # Customize HDInsight clusters using Bootstrap diff --git a/articles/hdinsight/hdinsight-sdk-java-samples.md b/articles/hdinsight/hdinsight-sdk-java-samples.md index c8560405a686..785ba3954ba8 100644 --- a/articles/hdinsight/hdinsight-sdk-java-samples.md +++ b/articles/hdinsight/hdinsight-sdk-java-samples.md @@ -4,7 +4,7 @@ description: Find Java examples on GitHub for common tasks using the HDInsight S ms.custom: devx-track-java ms.service: hdinsight ms.topic: sample -ms.date: 11/29/2019 +ms.date: 05/30/2022 --- # Azure HDInsight: Java samples @@ -37,4 +37,4 @@ You can get these samples for Java by cloning the [hdinsight-java-sdk-samples](h [!INCLUDE [hdinsight-sdk-additional-functionality](includes/hdinsight-sdk-additional-functionality.md)] -Code snippets for this additional SDK functionality can be found in the [HDInsight SDK for Java reference documentation](/java/api/overview/azure/hdinsight). \ No newline at end of file +Code snippets for this additional SDK functionality can be found in the [HDInsight SDK for Java reference documentation](/java/api/overview/azure/hdinsight). diff --git a/articles/hdinsight/hdinsight-sdk-python-samples.md b/articles/hdinsight/hdinsight-sdk-python-samples.md index 8c4d23dea1aa..d9adef3ba18f 100644 --- a/articles/hdinsight/hdinsight-sdk-python-samples.md +++ b/articles/hdinsight/hdinsight-sdk-python-samples.md @@ -3,7 +3,7 @@ title: 'Azure HDInsight: Python samples' description: Find Python examples on GitHub for common tasks using the HDInsight SDK for Python. ms.service: hdinsight ms.topic: sample -ms.date: 11/08/2019 +ms.date: 05/30/2022 ms.custom: devx-track-python --- @@ -40,4 +40,4 @@ You can get these samples for Python by cloning the [hdinsight-python-sdk-sample [!INCLUDE [hdinsight-sdk-additional-functionality](includes/hdinsight-sdk-additional-functionality.md)] -Code snippets for this additional SDK functionality can be found in the [HDInsight SDK for Python reference documentation](/python/api/overview/azure/hdinsight). \ No newline at end of file +Code snippets for this additional SDK functionality can be found in the [HDInsight SDK for Python reference documentation](/python/api/overview/azure/hdinsight). diff --git a/articles/hdinsight/interactive-query/hive-migration-across-storage-accounts.md b/articles/hdinsight/interactive-query/hive-migration-across-storage-accounts.md index d39af56edeb3..a7b958718e05 100644 --- a/articles/hdinsight/interactive-query/hive-migration-across-storage-accounts.md +++ b/articles/hdinsight/interactive-query/hive-migration-across-storage-accounts.md @@ -6,7 +6,7 @@ ms.author: kevx ms.reviewer: ms.service: hdinsight ms.topic: how-to -ms.date: 12/11/2020 +ms.date: 05/26/2022 --- # Hive workload migration to new account in Azure Storage diff --git a/articles/hdinsight/kafka/apache-kafka-connect-vpn-gateway.md b/articles/hdinsight/kafka/apache-kafka-connect-vpn-gateway.md index 6136eed77067..e1ad13358b0e 100644 --- a/articles/hdinsight/kafka/apache-kafka-connect-vpn-gateway.md +++ b/articles/hdinsight/kafka/apache-kafka-connect-vpn-gateway.md @@ -4,7 +4,7 @@ description: Learn how to directly connect to Kafka on HDInsight through an Azur ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive, devx-track-python -ms.date: 03/04/2020 +ms.date: 05/30/2022 --- # Connect to Apache Kafka on HDInsight through an Azure Virtual Network diff --git a/articles/hdinsight/overview-azure-storage.md b/articles/hdinsight/overview-azure-storage.md index 4ad20df2154b..a94e303081e6 100644 --- a/articles/hdinsight/overview-azure-storage.md +++ b/articles/hdinsight/overview-azure-storage.md @@ -4,7 +4,7 @@ description: Overview of Azure Storage in HDInsight. ms.service: hdinsight ms.topic: conceptual ms.custom: seoapr2020 -ms.date: 04/21/2020 +ms.date: 05/30/2022 --- # Azure Storage overview in HDInsight @@ -80,4 +80,4 @@ Certain MapReduce jobs and packages might create intermediate results that you w - [Introduction to Azure Storage](../storage/common/storage-introduction.md) - [Azure Data Lake Storage Gen1 overview](./overview-data-lake-storage-gen1.md) - [Use Azure storage with Azure HDInsight clusters](hdinsight-hadoop-use-blob-storage.md) -- [Use Azure Data Lake Storage Gen2 with Azure HDInsight clusters](hdinsight-hadoop-use-data-lake-storage-gen2.md) \ No newline at end of file +- [Use Azure Data Lake Storage Gen2 with Azure HDInsight clusters](hdinsight-hadoop-use-data-lake-storage-gen2.md) diff --git a/articles/hdinsight/spark/apache-spark-eclipse-tool-plugin.md b/articles/hdinsight/spark/apache-spark-eclipse-tool-plugin.md index f68c4ca17bfb..031a1ae16743 100644 --- a/articles/hdinsight/spark/apache-spark-eclipse-tool-plugin.md +++ b/articles/hdinsight/spark/apache-spark-eclipse-tool-plugin.md @@ -4,7 +4,7 @@ description: Use HDInsight Tools in Azure Toolkit for Eclipse to develop Spark a ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 12/13/2019 +ms.date: 05/30/2022 --- # Use Azure Toolkit for Eclipse to create Apache Spark applications for an HDInsight cluster @@ -348,4 +348,4 @@ There are two modes to submit the jobs. If storage credential is provided, batch ### Managing resources * [Manage resources for the Apache Spark cluster in Azure HDInsight](apache-spark-resource-manager.md) -* [Track and debug jobs running on an Apache Spark cluster in HDInsight](apache-spark-job-debugging.md) \ No newline at end of file +* [Track and debug jobs running on an Apache Spark cluster in HDInsight](apache-spark-job-debugging.md) diff --git a/articles/hdinsight/spark/apache-spark-intellij-tool-debug-remotely-through-ssh.md b/articles/hdinsight/spark/apache-spark-intellij-tool-debug-remotely-through-ssh.md index cd0f899fba29..b8c6d65da9a5 100644 --- a/articles/hdinsight/spark/apache-spark-intellij-tool-debug-remotely-through-ssh.md +++ b/articles/hdinsight/spark/apache-spark-intellij-tool-debug-remotely-through-ssh.md @@ -4,7 +4,7 @@ description: Step-by-step guidance on how to use HDInsight Tools in Azure Toolki ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive,hdiseo17may2017 -ms.date: 12/23/2019 +ms.date: 05/30/2022 --- # Debug Apache Spark applications on an HDInsight cluster with Azure Toolkit for IntelliJ through SSH @@ -172,4 +172,4 @@ This article provides step-by-step guidance on how to use HDInsight Tools in [Az ### Manage resources * [Manage resources for the Apache Spark cluster in Azure HDInsight](apache-spark-resource-manager.md) -* [Track and debug jobs running on an Apache Spark cluster in HDInsight](apache-spark-job-debugging.md) \ No newline at end of file +* [Track and debug jobs running on an Apache Spark cluster in HDInsight](apache-spark-job-debugging.md) diff --git a/articles/hdinsight/spark/apache-spark-intellij-tool-plugin-debug-jobs-remotely.md b/articles/hdinsight/spark/apache-spark-intellij-tool-plugin-debug-jobs-remotely.md index 5b9a7d4fd65a..a9db399bc9b5 100644 --- a/articles/hdinsight/spark/apache-spark-intellij-tool-plugin-debug-jobs-remotely.md +++ b/articles/hdinsight/spark/apache-spark-intellij-tool-plugin-debug-jobs-remotely.md @@ -4,7 +4,7 @@ description: Learn how to use HDInsight Tools in Azure Toolkit for IntelliJ to r ms.service: hdinsight ms.custom: hdinsightactive ms.topic: how-to -ms.date: 11/28/2017 +ms.date: 05/30/2022 --- # Use Azure Toolkit for IntelliJ to debug Apache Spark applications remotely in HDInsight through VPN @@ -320,4 +320,4 @@ We recommend that you also create an Apache Spark cluster in Azure HDInsight tha ### Manage resources * [Manage resources for the Apache Spark cluster in Azure HDInsight](apache-spark-resource-manager.md) -* [Track and debug jobs that run on an Apache Spark cluster in HDInsight](apache-spark-job-debugging.md) \ No newline at end of file +* [Track and debug jobs that run on an Apache Spark cluster in HDInsight](apache-spark-job-debugging.md) diff --git a/articles/hdinsight/spark/apache-spark-settings.md b/articles/hdinsight/spark/apache-spark-settings.md index cf11a254afa0..5d9a26ac3297 100644 --- a/articles/hdinsight/spark/apache-spark-settings.md +++ b/articles/hdinsight/spark/apache-spark-settings.md @@ -4,7 +4,7 @@ description: How to view and configure Apache Spark settings for an Azure HDInsi ms.service: hdinsight ms.topic: conceptual ms.custom: hdinsightactive,seoapr2020 -ms.date: 04/24/2020 +ms.date: 05/30/2022 --- # Configure Apache Spark settings diff --git a/articles/healthcare-apis/dicom/media/dicom-add-api-permissions.png b/articles/healthcare-apis/dicom/media/dicom-add-api-permissions.png deleted file mode 100644 index 5c1090a29e80..000000000000 Binary files a/articles/healthcare-apis/dicom/media/dicom-add-api-permissions.png and /dev/null differ diff --git a/articles/healthcare-apis/dicom/media/dicom-add-apis-permissions.png b/articles/healthcare-apis/dicom/media/dicom-add-apis-permissions.png new file mode 100644 index 000000000000..41a974daa393 Binary files /dev/null and b/articles/healthcare-apis/dicom/media/dicom-add-apis-permissions.png differ diff --git a/articles/healthcare-apis/dicom/media/dicom-search-apis-permissions.png b/articles/healthcare-apis/dicom/media/dicom-search-apis-permissions.png index 3a2d7531f4eb..0adb86986d84 100644 Binary files a/articles/healthcare-apis/dicom/media/dicom-search-apis-permissions.png and b/articles/healthcare-apis/dicom/media/dicom-search-apis-permissions.png differ diff --git a/articles/healthcare-apis/dicom/media/dicom-select-scopes.png b/articles/healthcare-apis/dicom/media/dicom-select-scopes.png index 5bd0f9cdbb09..1ce88f4c21b5 100644 Binary files a/articles/healthcare-apis/dicom/media/dicom-select-scopes.png and b/articles/healthcare-apis/dicom/media/dicom-select-scopes.png differ diff --git a/articles/healthcare-apis/register-application.md b/articles/healthcare-apis/register-application.md index 5505bc48076a..1f8c05bb2916 100644 --- a/articles/healthcare-apis/register-application.md +++ b/articles/healthcare-apis/register-application.md @@ -5,7 +5,7 @@ services: healthcare-apis author: ginalee-dotcom ms.service: healthcare-apis ms.topic: tutorial -ms.date: 05/03/2022 +ms.date: 05/27/2022 ms.author: mikaelw --- @@ -71,15 +71,15 @@ The following steps are required for the DICOM service, but optional for the FHI 1. Select the **API permissions** blade. - [ ![Add API permissions](dicom/media/dicom-add-api-permissions.png) ](dicom/media/dicom-add-api-permissions.png#lightbox) + [ ![Add API permissions](dicom/media/dicom-add-apis-permissions.png) ](dicom/media/dicom-add-apis-permissions.png#lightbox) 2. Select **Add a permission**. - If you're using Azure Health Data Services, you'll add a permission to the DICOM service by searching for **Azure API for DICOM** under **APIs my organization** uses. + If you're using Azure Health Data Services, you'll add a permission to the DICOM service by searching for **Azure Healthcare APIs** under **APIs my organization** uses. [ ![Search API permissions](dicom/media/dicom-search-apis-permissions.png) ](dicom/media/dicom-search-apis-permissions.png#lightbox) - The search result for Azure API for DICOM will only return if you've already deployed the DICOM service in the workspace. + The search result for Azure Healthcare APIs will only return if you've already deployed the DICOM service in the workspace. If you're referencing a different resource application, select your DICOM API Resource Application Registration that you created previously under **APIs my organization**. @@ -88,7 +88,7 @@ The following steps are required for the DICOM service, but optional for the FHI [ ![Select permissions scopes.](dicom/media/dicom-select-scopes.png) ](dicom/media/dicom-select-scopes.png#lightbox) >[!NOTE] ->Use grant_type of client_credentials when trying to otain an access token for the FHIR service using tools such as Postman or REST Client. For more details, visit [Access using Postman](./fhir/use-postman.md) and [Accessing Azure Health Data Services using the REST Client Extension in Visual Studio Code](./fhir/using-rest-client.md). +>Use grant_type of client_credentials when trying to obtain an access token for the FHIR service using tools such as Postman or REST Client. For more details, visit [Access using Postman](./fhir/use-postman.md) and [Accessing Azure Health Data Services using the REST Client Extension in Visual Studio Code](./fhir/using-rest-client.md). >>Use grant_type of client_credentials or authentication_doe when trying to obtain an access token for the DICOM service. For more details, visit [Using DICOM with cURL](dicom/dicomweb-standard-apis-curl.md). Your application registration is now complete. diff --git a/articles/hpc-cache/hpc-cache-security-info.md b/articles/hpc-cache/hpc-cache-security-info.md index 2a0a6d6d51a8..612514453737 100644 --- a/articles/hpc-cache/hpc-cache-security-info.md +++ b/articles/hpc-cache/hpc-cache-security-info.md @@ -16,7 +16,7 @@ This security information applies to Microsoft Azure HPC Cache. It addresses com The HPC Cache Service is only accessible through your private virtual network. Microsoft cannot access your virtual network. -Learn more about [connecting private networks](/security/benchmark/azure/baselines/hpc-cache-security-baseline.md). +Learn more about [connecting private networks](/security/benchmark/azure/baselines/hpc-cache-security-baseline). ## Network infrastructure requirements @@ -48,4 +48,4 @@ You can also optionally configure network security groups (NSGs) to control inbo ## Next steps -* Review [Azure HPC Cache security baseline](/security/benchmark/azure/baselines/hpc-cache-security-baseline.md). +* Review [Azure HPC Cache security baseline](/security/benchmark/azure/baselines/hpc-cache-security-baseline). diff --git a/articles/hpc-cache/troubleshoot-nas.md b/articles/hpc-cache/troubleshoot-nas.md index 2ffba35b0681..4059ce420f81 100644 --- a/articles/hpc-cache/troubleshoot-nas.md +++ b/articles/hpc-cache/troubleshoot-nas.md @@ -4,7 +4,7 @@ description: Tips to avoid and fix configuration errors and other problems that author: ekpgh ms.service: hpc-cache ms.topic: troubleshooting -ms.date: 05/26/2022 +ms.date: 05/27/2022 ms.author: v-erinkelly --- @@ -49,7 +49,7 @@ Check these settings both on the NAS itself and also on any firewalls between th ## Check root squash settings -Root squash settings can disrupt file access if they are improperly configured. You should check that the settings on each storage export and on the matching HPC Cache client access policies are consistent. +Root squash settings can disrupt file access if they are improperly configured. You should check that the settings on each storage export and on the matching HPC Cache client access policies are appropriate. Root squash prevents requests sent by a local superuser root on the client from being sent to a back-end storage system as root. It reassigns requests from root to a non-privileged user ID (UID) like 'nobody'. @@ -65,14 +65,14 @@ Root squash can be configured in an HPC Cache system in these places: * At the storage export - You can configure your storage system to reassign incoming requests from root to a non-privileged user ID (UID). -These two settings should match. That is, if a storage system export squashes root, you should change its HPC Cache client access rule to also squash root. If the settings don't match, you can have access problems when you try to read or write to the back-end storage system through the HPC Cache. +If your storage system export squashes root, you should update the HPC Cache client access rule for that storage target to also squash root. If not, you can have access problems when you try to read or write to the back-end storage system through the HPC Cache. -This table illustrates the behavior for different root squash scenarios when a client request is sent as UID 0 (root). The scenarios marked with * are ***not recommended*** because they can cause access problems. +This table illustrates the behavior for different root squash scenarios when a client request is sent as UID 0 (root). The scenario marked with * is ***not recommended*** because it can cause access problems. | Setting | UID sent from client | UID sent from HPC Cache | Effective UID on back-end storage | |--|--|--|--| | no root squash | 0 (root) | 0 (root) | 0 (root) | -| *root squash at HPC Cache only | 0 (root) | 65534 (nobody) | 65534 (nobody) | +| root squash at HPC Cache only | 0 (root) | 65534 (nobody) | 65534 (nobody) | | *root squash at NAS storage only | 0 (root) | 0 (root) | 65534 (nobody) | | root squash at HPC Cache and NAS | 0 (root) | 65534 (nobody) | 65534 (nobody) | @@ -80,6 +80,7 @@ This table illustrates the behavior for different root squash scenarios when a c ## Check access on directory paths + For NAS systems that export hierarchical directories, check that Azure HPC Cache has appropriate access to each export level in the path to the files you are using. diff --git a/articles/index.yml b/articles/index.yml index d7cf890d8836..460f2c3347ac 100644 --- a/articles/index.yml +++ b/articles/index.yml @@ -287,7 +287,7 @@ productDirectory: azureCategories: - ai-machine-learning - media - url: /azure-video-indexer/index.yml + url: /azure/azure-video-indexer/ # Card - title: Azure Analysis Services summary: Enterprise-grade analytics engine as a service diff --git a/articles/iot-central/TOC.yml b/articles/iot-central/TOC.yml index 0d9987a3ea4c..6b012afbab8f 100644 --- a/articles/iot-central/TOC.yml +++ b/articles/iot-central/TOC.yml @@ -222,6 +222,9 @@ href: core/howto-manage-jobs-with-rest-api.md - name: Manage applications href: core/howto-manage-iot-central-with-rest-api.md + - name: File upload + href: core/howto-upload-file-rest-api.md + - name: Secure and administer your application items: diff --git a/articles/iot-central/core/howto-create-custom-analytics.md b/articles/iot-central/core/howto-create-custom-analytics.md index fc24bcd06a7c..1c69ca606db2 100644 --- a/articles/iot-central/core/howto-create-custom-analytics.md +++ b/articles/iot-central/core/howto-create-custom-analytics.md @@ -26,81 +26,75 @@ In this how-to guide, you learn how to: ## Prerequisites -To complete the steps in this how-to guide, you need an active Azure subscription. +[!INCLUDE [azure-cli-prepare-your-environment-no-header](../../../includes/azure-cli-prepare-your-environment-no-header.md)] -If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. +## Run the Script -### IoT Central application +The below script will create an IoT Central application, Event Hubs namespace, and Databricks workspace in a resource group called `eventhubsrg`. -Create an IoT Central application on the [Azure IoT Central application manager](https://aka.ms/iotcentral) website with the following settings: +```azurecli-interactive -| Setting | Value | -| ------- | ----- | -| Pricing plan | Standard | -| Application template | In-store analytics – condition monitoring | -| Application name | Accept the default or choose your own name | -| URL | Accept the default or choose your own unique URL prefix | -| Directory | Your Azure Active Directory tenant | -| Azure subscription | Your Azure subscription | -| Region | Your nearest region | +# A unique name for the Event Hub Namespace. +eventhubnamespace="your-event-hubs-name-data-bricks" -The examples and screenshots in this article use the **United States** region. Choose a location close to you and make sure you create all your resources in the same region. +# A unique name for the IoT Central application. +iotcentralapplicationname="your-app-name-data-bricks" -This application template includes two simulated thermostat devices that send telemetry. +# A unique name for the Databricks workspace. +databricksworkspace="your-databricks-name-data-bricks" -### Resource group +# Name for the Resource group. +resourcegroup=eventhubsrg -Use the [Azure portal to create a resource group](https://portal.azure.com/#create/Microsoft.ResourceGroup) called **IoTCentralAnalysis** to contain the other resources you create. Create your Azure resources in the same location as your IoT Central application. +eventhub=centralexport +location=eastus +authrule=ListenSend -### Event Hubs namespace -Use the [Azure portal to create an Event Hubs namespace](https://portal.azure.com/#create/Microsoft.EventHub) with the following settings: +#Create a resource group for the IoT Central application. +RESOURCE_GROUP=$(az group create --name $resourcegroup --location $location) -| Setting | Value | -| ------- | ----- | -| Name | Choose your namespace name | -| Pricing tier | Basic | -| Subscription | Your subscription | -| Resource group | IoTCentralAnalysis | -| Location | East US | -| Throughput Units | 1 | +# Create an IoT Central application +IOT_CENTRAL=$(az iot central app create -n $iotcentralapplicationname -g $resourcegroup -s $iotcentralapplicationname -l $location --mi-system-assigned) -### Azure Databricks workspace -Use the [Azure portal to create an Azure Databricks Service](https://portal.azure.com/#create/Microsoft.Databricks) with the following settings: +# Create an Event Hubs namespace. +az eventhubs namespace create --name $eventhubnamespace --resource-group $resourcegroup -l $location + +# Create an Azure Databricks workspace +DATABRICKS_JSON=$(az databricks workspace create --resource-group $resourcegroupname --name $databricksworkspace --location $location --sku standard) + + +# Create an Event Hub +az eventhubs eventhub create --name $eventhub --resource-group $resourcegroupname --namespace-name $eventhubnamespace -| Setting | Value | -| ------- | ----- | -| Workspace name | Choose your workspace name | -| Subscription | Your subscription | -| Resource group | IoTCentralAnalysis | -| Location | East US | -| Pricing Tier | Standard | -When you've created the required resources, your **IoTCentralAnalysis** resource group looks like the following screenshot: +# Configure the managed identity for your IoT Central application +# with permissions to send data to an event hub in the resource group. +MANAGED_IDENTITY=$(az iot central app identity show --name $iotcentralapplicationname \ + --resource-group $resourcegroup) +az role assignment create --assignee $(jq -r .principalId <<< $MANAGED_IDENTITY) --role 'Azure Event Hubs Data Sender' --scope $(jq -r .id <<< $RESOURCE_GROUP) -:::image type="content" source="media/howto-create-custom-analytics/resource-group.png" alt-text="image of IoT Central analysis resource group."::: -## Create an event hub +# Create a connection string to use in Databricks notebook +az eventhubs eventhub authorization-rule create --eventhub-name $eh --namespace-name $ehns --resource-group $rg --name $authrule --rights Listen Send +EHAUTH_JSON=$(az eventhubs eventhub authorization-rule keys list --resource-group $rg --namespace-name $ehns --eventhub-name $eh --name $authrule) -You can configure an IoT Central application to continuously export telemetry to an event hub. In this section, you create an event hub to receive telemetry from your IoT Central application. The event hub delivers the telemetry to your Stream Analytics job for processing. +# Details of your IoT Central application, databricks workspace, and event hub connection string -1. In the Azure portal, navigate to your Event Hubs namespace and select **+ Event Hub**. -1. Name your event hub **centralexport**. -1. In the list of event hubs in your namespace, select **centralexport**. Then choose **Shared access policies**. -1. Select **+ Add**. Create a policy named **SendListen** with the **Send** and **Listen** claims. -1. When the policy is ready, select it in the list, and then copy the **Connection string-primary key** value. -1. Make a note of this connection string, you use it later when you configure your Databricks notebook to read from the event hub. +echo "Your IoT Central app: https://$iotcentralapplicationname.azureiotcentral.com/" +echo "Your Databricks workspace: https://$(jq -r .workspaceUrl <<< $DATABRICKS_JSON)" +echo "Your event hub connection string is: $(jq -r .primaryConnectionString <<< EHAUTH_JSON)" -Your Event Hubs namespace looks like the following screenshot: +``` -:::image type="content" source="media/howto-create-custom-analytics/event-hubs-namespace.png" alt-text="image of Event Hubs namespace."::: +Make a note of the three values output by the script, you need them in the following steps. ## Configure export in IoT Central In this section, you configure the application to stream telemetry from its simulated devices to your event hub. -On the [Azure IoT Central application manager](https://aka.ms/iotcentral) website, navigate to the IoT Central application you created previously. To configure the export, first create a destination: +Use the URL output by the script to navigate to the IoT Central application it created. 1. Navigate to the **Data export** page, then select **Destinations**. 1. Select **+ New destination**. @@ -110,9 +104,9 @@ On the [Azure IoT Central application manager](https://aka.ms/iotcentral) websit | ----- | ----- | | Destination name | Telemetry event hub | | Destination type | Azure Event Hubs | - | Connection string | The event hub connection string you made a note of previously | - - The **Event Hub** shows as **centralexport**. + | Authorization | System-assigned managed identity | + | Host name | The event hub namespace host name, it's the value you assigned to `eventhubnamespace` in the earlier script | + | Event Hub | The event hub name, it's the value you assigned to `eventhub` in the earlier script | :::image type="content" source="media/howto-create-custom-analytics/data-export-1.png" alt-text="Screenshot showing data export destination."::: @@ -137,13 +131,35 @@ To create the export definition: Wait until the export status is **Healthy** on the **Data export** page before you continue. +## Create a device template + +To add a device template for the MXChip device: + +1. Select **+ New** on the **Device templates** page. +1. On the **Select type** page, scroll down until you find the **MXCHIP AZ3166** tile in the **Featured device templates** section. +1. Select the **MXCHIP AZ3166** tile, and then select **Next: Review**. +1. On the **Review** page, select **Create**. + +## Add a device + +To add a simulated device to your Azure IoT Central application: + +1. Choose **Devices** on the left pane. +1. Choose the **MXCHIP AZ3166** device template from which you created. +1. Choose + **New**. +1. Enter a device name and ID or accept the default. The maximum length of a device name is 148 characters. The maximum length of a device ID is 128 characters. +1. Turn the **Simulated** toggle to **On**. +1. Select **Create**. + +Repeat these steps to add two more simulated MXChip devices to your application. + ## Configure Databricks workspace -In the Azure portal, navigate to your Azure Databricks service and select **Launch Workspace**. A new tab opens in your browser and signs you in to your workspace. +Use the URL output by the script to navigate to the Databricks workspace it created. ### Create a cluster -On the **Azure Databricks** page, under the list of common tasks, select **New Cluster**. +Navigate to **Create** page in your Databricks environment. Select the **+ Cluster**. Use the information in the following table to create your cluster: @@ -151,8 +167,7 @@ Use the information in the following table to create your cluster: | ------- | ----- | | Cluster Name | centralanalysis | | Cluster Mode | Standard | -| Databricks Runtime Version | 5.5 LTS (Scala 2.11, Spark 2.4.5) | -| Python Version | 3 | +| Databricks Runtime Version | Runtime: 10.4 LTS (Scala 2.12, Spark 3.2.1) | | Enable Autoscaling | No | | Terminate after minutes of inactivity | 30 | | Worker Type | Standard_DS3_v2 | @@ -187,7 +202,9 @@ The following steps show you how to import the library your sample needs into th Use the following steps to import a Databricks notebook that contains the Python code to analyze and visualize your IoT Central telemetry: -1. Navigate to the **Workspace** page in your Databricks environment. Select the dropdown next to your account name and then choose **Import**. +1. Navigate to the **Workspace** page in your Databricks environment. Select the dropdown from the workspace and then choose **Import**. + + :::image type="content" source="media/howto-create-custom-analytics/databricks-import.png" alt-text="Screenshot of data bricks import."::: 1. Choose to import from a URL and enter the following address: [https://github.com/Azure-Samples/iot-central-docs-samples/blob/master/databricks/IoT%20Central%20Analysis.dbc?raw=true](https://github.com/Azure-Samples/iot-central-docs-samples/blob/master/databricks/IoT%20Central%20Analysis.dbc?raw=true) @@ -195,9 +212,9 @@ Use the following steps to import a Databricks notebook that contains the Python 1. Select the **Workspace** to view the imported notebook: -:::image type="content" source="media/howto-create-custom-analytics/import-notebook.png" alt-text="Screenshot of Imported notebook."::: + :::image type="content" source="media/howto-create-custom-analytics/import-notebook.png" alt-text="Screenshot of Imported notebook."::: -5. Edit the code in the first Python cell to add the Event Hubs connection string you saved previously: +1. Use the connection string output by the script to edit the code in the first Python cell to add the Event Hubs connection string: ```python from pyspark.sql.functions import * @@ -221,7 +238,7 @@ You may see an error in the last cell. If so, check the previous cells are runni ### View smoothed data -In the notebook, scroll down to cell 14 to see a plot of the rolling average humidity by device type. This plot continuously updates as streaming telemetry arrives: +In the notebook, scroll down to see a plot of the rolling average humidity by device type. This plot continuously updates as streaming telemetry arrives: :::image type="content" source="media/howto-create-custom-analytics/telemetry-plot.png" alt-text="Screenshot of Smoothed telemetry plot."::: @@ -229,7 +246,7 @@ You can resize the chart in the notebook. ### View box plots -In the notebook, scroll down to cell 20 to see the [box plots](https://en.wikipedia.org/wiki/Box_plot). The box plots are based on static data so to update them you must rerun the cell: +In the notebook, scroll down to see the [box plots](https://en.wikipedia.org/wiki/Box_plot). The box plots are based on static data so to update them you must rerun the cell: :::image type="content" source="media/howto-create-custom-analytics/box-plots.png" alt-text="Screenshot of box plots."::: @@ -237,9 +254,11 @@ You can resize the plots in the notebook. ## Tidy up -To tidy up after this how-to and avoid unnecessary costs, delete the **IoTCentralAnalysis** resource group in the Azure portal. +To tidy up after this how-to and avoid unnecessary costs, you can run the following command to delete the resource group: -You can delete the IoT Central application from the **Management** page within the application. +```azurecli-interactive +az group delete -n eventhubsrg +``` ## Next steps diff --git a/articles/iot-central/core/howto-upload-file-rest-api.md b/articles/iot-central/core/howto-upload-file-rest-api.md new file mode 100644 index 000000000000..a83cb35dc40c --- /dev/null +++ b/articles/iot-central/core/howto-upload-file-rest-api.md @@ -0,0 +1,322 @@ +--- +title: Use the REST API to add upload storage account configuration in Azure IoT Central +description: How to use the IoT Central REST API to add upload storage account configuration in an application +author: v-krishnag +ms.author: v-krishnag +ms.date: 05/12/2022 +ms.topic: how-to +ms.service: iot-central +services: iot-central + +--- + +# How to use the IoT Central REST API to upload a file + +IoT Central lets you upload media and other files from connected devices to cloud storage. You configure the file upload capability in your IoT Central application, and then implement file uploads in your device code. In this article, learn how to: + +* Use the REST API to configure the file upload capability in your IoT Central application. +* Test the file upload by running some sample device code. + +The IoT Central REST API lets you: + +* Add a file upload storage account configuration +* Update a file upload storage account configuration +* Get the file upload storage account configuration +* Delete the file upload storage configuration + +Every IoT Central REST API call requires an authorization header. To learn more, see [How to authenticate and authorize IoT Central REST API calls](howto-authorize-rest-api.md). + +For the reference documentation for the IoT Central REST API, see [Azure IoT Central REST API reference](/rest/api/iotcentral/). + +[!INCLUDE [iot-central-postman-collection](../../../includes/iot-central-postman-collection.md)] + +## Prerequisites + +To test the file upload, install the following prerequisites in your local development environment: + +* [Node.js](https://nodejs.org/en/download/) +* [Visual Studio Code](https://code.visualstudio.com/Download) + +## Add a file upload storage account configuration + +### Create a storage account + +To use the Azure Storage REST API, you need a bearer token for the `management.azure.com` resource. To get a bearer token, you can use the Azure CLI: + +```azurecli +az account get-access-token --resource https://management.azure.com +``` + +If you don't have a storage account for your blobs, you can use the following request to create one in your subscription: + +```http +PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}?api-version=2021-09-01 + +``` + +The request headers have the following fields: + +* `subscriptionId` : The ID of the target subscription. +* `resourceGroupName`: The name of the resource group in your subscription. The name is case insensitive. +* `accountName` : The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only. + +The request body has the following required fields: + +* `kind` : Type of storage account +* `location` : The geo-location where the resource lives +* `sku`: The SKU name. + +```json +{ + "kind": "BlockBlobStorage", + "location": "West US", + "sku": "Premium_LRS" +} +``` + +### Create a container + +Use the following request to create a container called `fileuploads` in your storage account for your blobs: + +```http +PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/fileuploads?api-version=2021-09-01 +``` + +* `containerName` : Blob container names must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter or number. + +Send an empty request body with this request that looks like the following example: + +```json +{ +} +``` + +The response to this request looks like the following example: + +```json +{ + "id": "/subscriptions/your-subscription-id/resourceGroups/yourResourceGroupName/providers/Microsoft.Storage/storageAccounts/yourAccountName/blobServices/default/containers/fileuploads", + "name": "fileuploads", + "type": "Microsoft.Storage/storageAccounts/blobServices/containers" +} +``` + +### Get the storage account keys + +Use the following request to retrieve that storage account keys that you need when you configure the upload in IoT Central: + +```http +POST https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys?api-version=2021-09-01 +``` + +The response to this request looks like the following example: + +```json +{ + "keys": [ + { + "creationTime": "2022-05-19T19:22:40.9132287Z", + "keyName": "key1", + "value": "j3UTm**************==", + "permissions": "FULL" + }, + { + "creationTime": "2022-05-19T19:22:40.9132287Z", + "keyName": "key2", + "value": "Nbs3W**************==", + "permissions": "FULL" + } + ] +} +``` + +### Create the upload configuration + +Use the following request to create a file upload blob storage account configuration in your IoT Central application: + +```http +PUT https://{your-app-subdomain}.azureiotcentral.com/api/fileUploads?api-version=1.2-preview +``` + +The request body has the following fields: + +* `account`: The storage account name where to upload the file to. +* `connectionString`: The connection string to connect to the storage account. Use one of the `value` values from the previous `listKeys` request as the `AccountKey` value. +* `container`: The name of the container inside the storage account. The following example uses the name `fileuploads`. +* `etag`: ETag to prevent conflict with multiple uploads +* `sasTtl`: ISO 8601 duration standard, The amount of time the device’s request to upload a file is valid before it expires. + +```json +{ + "account": "yourAccountName", + "connectionString": "DefaultEndpointsProtocol=https;AccountName=yourAccountName;AccountKey=*****;BlobEndpoint=https://yourAccountName.blob.core.windows.net/", + "container": "fileuploads", + "sasTtl": "PT1H" +} +``` + +The response to this request looks like the following example: + +```json +{ + "account": "yourAccountName", + "connectionString": "DefaultEndpointsProtocol=https;AccountName=yourAccountName;AccountKey=*****;BlobEndpoint=https://yourAccountName.blob.core.windows.net/", + "container": "fileuploads", + "sasTtl": "PT1H", + "state": "pending", + "etag": "\"7502ac89-0000-0300-0000-627eaf100000\"" + +} + +``` + +## Get the file upload storage account configuration + +Use the following request to retrieve details of a file upload blob storage account configuration in your IoT Central application: + + +```http +GET https://{your-app-subdomain}.azureiotcentral.com/api/fileUploads?api-version=1.2-preview +``` + +The response to this request looks like the following example: + +```json +{ + "account": "yourAccountName", + "connectionString": "DefaultEndpointsProtocol=https;AccountName=yourAccountName;AccountKey=*****;BlobEndpoint=https://yourAccountName.blob.core.windows.net/", + "container": "yourContainerName", + "state": "succeeded", + "etag": "\"7502ac89-0000-0300-0000-627eaf100000\"" + +} +``` + +## Update the file upload storage account configuration + +Use the following request to update a file upload blob storage account configuration in your IoT Central application: + +```http +PATCH https://{your-app-subdomain}.azureiotcentral.com/api/fileUploads?api-version=1.2-preview +``` + +```json +{ + "account": "yourAccountName", + "connectionString": "DefaultEndpointsProtocol=https;AccountName=yourAccountName;AccountKey=*****;BlobEndpoint=https://yourAccountName.blob.core.windows.net/", + "container": "yourContainerName2", + "sasTtl": "PT1H" +} +``` + +The response to this request looks like the following example: + +```json + +{ + "account": "yourAccountName", + "connectionString": "DefaultEndpointsProtocol=https;AccountName=yourAccountName;AccountKey=*****;BlobEndpoint=https://yourAccountName.blob.core.windows.net/", + "container": "yourContainerName2", + "sasTtl": "PT1H", + "state": "succeeded", + "etag": "\"7502ac89-0000-0300-0000-627eaf100000\"" +} +``` + +## Remove the file upload storage account configuration + +Use the following request to delete a storage account configuration: + +```http +DELETE https://{your-app-subdomain}.azureiotcentral.com/api/fileUploads?api-version=1.2-preview +``` + +## Test file upload + +After you [configure file uploads](#add-a-file-upload-storage-account-configuration) in your IoT Central application, you can test it with the sample code. If you haven't already cloned the file upload sample repository, use the following commands to clone it to a suitable location on your local machine and install the dependent packages: + +``` +git clone https://github.com/azure-Samples/iot-central-file-upload-device +cd iotc-file-upload-device +npm i +npm build +``` + +### Create the device template and import the model + +To test the file upload you run a sample device application. Create a device template for the sample device to use. + +1. Open your application in IoT Central UI. + +1. Navigate to the **Device Templates** tab in the left pane, select **+ New**: + +1. Choose **IoT device** as the template type. + +1. On the **Customize** page of the wizard, enter a name such as *File Upload Device Sample* for the device template. + +1. On the **Review** page, select **Create**. + +1. Select **Import a model** and upload the *FileUploadDeviceDcm.json* manifest file from the folder `iotc-file-upload-device\setup` in the repository you downloaded previously. + +1. Select **Publish** to publish the device template. + +### Add a device + +To add a device to your Azure IoT Central application: + +1. Choose **Devices** on the left pane. + +1. Select the *File Upload Device Sample* device template which you created earlier. + +1. Select + **New** and select **Create**. + +1. Select the device which you created and Select **Connect** + +Copy the values for `ID scope`, `Device ID`, and `Primary key`. You'll use these values in the device sample code. + +### Run the sample code + +Open the git repository you downloaded in VS code. Create an ".env" file at the root of your project and add the values you copied above. The file should look like the sample below with the values you made a note of previously. + +``` +scopeId= +deviceId= +deviceKey= +modelId=dtmi:IoTCentral:IotCentralFileUploadDevice;1 +``` + +Open the git repository you downloaded in VS code. Press F5 to run/debug the sample. In your terminal window you see that the device is registered and is connected to IoT Central: + +``` + +Starting IoT Central device... + > Machine: Windows_NT, 8 core, freemem=6674mb, totalmem=16157mb +Starting device registration... +DPS registration succeeded +Connecting the device... +IoT Central successfully connected device: 7z1xo26yd8 +Sending telemetry: { + "TELEMETRY_SYSTEM_HEARTBEAT": 1 +} +Sending telemetry: { + "TELEMETRY_SYSTEM_HEARTBEAT": 1 +} +Sending telemetry: { + "TELEMETRY_SYSTEM_HEARTBEAT": 1 +} + +``` + +The sample project comes with a sample file named *datafile.json*. This is the file that's uploaded when you use the **Upload File** command in your IoT Central application. + +To test this open your application and select the device you created. Select the **Command** tab and you see a button named **Run**. When you select that button the IoT Central app calls a direct method on your device to upload the file. You can see this direct method in the sample code in the /device.ts file. The method is named *uploadFileCommand*. + +Select the **Raw data** tab to verify the file upload status. + +:::image type="content" source="media/howto-upload-file-rest-api/raw-data.png" alt-text="Screenshot showing the U I of how to verify a file upload." border="false"::: + +You can also make a [REST API](/rest/api/storageservices/list-blobs) call to verify the file upload status in the storage container. + +## Next steps + +Now that you've learned how to configure file uploads with the REST API, a suggested next step is to [How to create device templates from IoT Central GUI.](howto-set-up-template.md#create-a-device-template) diff --git a/articles/iot-central/core/howto-use-properties.md b/articles/iot-central/core/howto-use-properties.md index fc11a1c0e36f..03c4f4ca777c 100644 --- a/articles/iot-central/core/howto-use-properties.md +++ b/articles/iot-central/core/howto-use-properties.md @@ -83,11 +83,11 @@ Optional fields, such as display name and description, let you add more details When you create a property, you can specify complex schema types such as **Object** and **Enum**. -![Screenshot that shows how to add a capability.](./media/howto-use-properties/property.png) +:::image type="content" source="media/howto-use-properties/property.png" alt-text="Screenshot that shows how to add a capability."::: When you select the complex **Schema**, such as **Object**, you need to define the object, too. -:::image type="content" source="media/howto-use-properties/object.png" alt-text="Screenshot that shows how to define an object"::: +:::image type="content" source="media/howto-use-properties/object.png" alt-text="Screenshot that shows how to define an object."::: The following code shows the definition of an Object property type. This object has two fields with types string and integer. @@ -161,7 +161,7 @@ This article uses Node.js for simplicity. For other language examples, see the [ The following view in Azure IoT Central application shows the properties you can see. The view automatically makes the **Device model** property a _read-only device property_. -:::image type="content" source="media/howto-use-properties/read-only.png" alt-text="Screenshot that shows the view of a read-only property"::: +:::image type="content" source="media/howto-use-properties/read-only.png" alt-text="Screenshot that shows the view of a read-only property."::: ## Implement writable properties @@ -220,13 +220,25 @@ When the operator sets a writable property in the Azure IoT Central application, The following view shows the writable properties. When you enter the value and select **Save**, the initial status is **Pending**. When the device accepts the change, the status changes to **Accepted**. -![Screenshot that shows Pending status.](./media/howto-use-properties/status-pending.png) +:::image type="content" source="media/howto-use-properties/status-pending.png" alt-text="Screenshot that shows Pending status for a property update."::: -![Screenshot that shows Accepted property.](./media/howto-use-properties/accepted.png) +:::image type="content" source="media/howto-use-properties/accepted.png" alt-text="Screenshot that shows Accepted property for a completed update."::: + +## Use properties on unassigned devices + +You can view and update writable properties on a device that isn't assigned to a device template. + +To view existing properties on an unassigned device, navigate to the device in the **Devices** section, select **Manage device**, and then **Device Properties**: + +:::image type="content" source="media/howto-use-properties/view-unassigned-device-properties.png" alt-text="Screenshot that shows properties on an unassigned device."::: + +You can update the writable properties in this view: + +:::image type="content" source="media/howto-use-properties/update-unassigned-device-properties.png" alt-text="Screenshot that shows how to update properties."::: ## Next steps Now that you've learned how to use properties in your Azure IoT Central application, see: * [Payloads](concepts-telemetry-properties-commands.md) -* [Create and connect a client application to your Azure IoT Central application](tutorial-connect-device.md) \ No newline at end of file +* [Create and connect a client application to your Azure IoT Central application](tutorial-connect-device.md) diff --git a/articles/iot-central/core/media/howto-create-custom-analytics/box-plots.png b/articles/iot-central/core/media/howto-create-custom-analytics/box-plots.png index 9d6d40cd4dcb..57af9594bec1 100644 Binary files a/articles/iot-central/core/media/howto-create-custom-analytics/box-plots.png and b/articles/iot-central/core/media/howto-create-custom-analytics/box-plots.png differ diff --git a/articles/iot-central/core/media/howto-create-custom-analytics/cluster-libraries.png b/articles/iot-central/core/media/howto-create-custom-analytics/cluster-libraries.png index 5d98a40bec40..f330fb3d26a2 100644 Binary files a/articles/iot-central/core/media/howto-create-custom-analytics/cluster-libraries.png and b/articles/iot-central/core/media/howto-create-custom-analytics/cluster-libraries.png differ diff --git a/articles/iot-central/core/media/howto-create-custom-analytics/data-export-1.png b/articles/iot-central/core/media/howto-create-custom-analytics/data-export-1.png index 33a229af9472..468e3881ca7e 100644 Binary files a/articles/iot-central/core/media/howto-create-custom-analytics/data-export-1.png and b/articles/iot-central/core/media/howto-create-custom-analytics/data-export-1.png differ diff --git a/articles/iot-central/core/media/howto-create-custom-analytics/databricks-import.png b/articles/iot-central/core/media/howto-create-custom-analytics/databricks-import.png new file mode 100644 index 000000000000..75fce72cc6a6 Binary files /dev/null and b/articles/iot-central/core/media/howto-create-custom-analytics/databricks-import.png differ diff --git a/articles/iot-central/core/media/howto-create-custom-analytics/import-notebook.png b/articles/iot-central/core/media/howto-create-custom-analytics/import-notebook.png index 9eaf75934831..1db12ae92a52 100644 Binary files a/articles/iot-central/core/media/howto-create-custom-analytics/import-notebook.png and b/articles/iot-central/core/media/howto-create-custom-analytics/import-notebook.png differ diff --git a/articles/iot-central/core/media/howto-create-custom-analytics/telemetry-plot.png b/articles/iot-central/core/media/howto-create-custom-analytics/telemetry-plot.png index d2b893fa01cc..30124fd568b5 100644 Binary files a/articles/iot-central/core/media/howto-create-custom-analytics/telemetry-plot.png and b/articles/iot-central/core/media/howto-create-custom-analytics/telemetry-plot.png differ diff --git a/articles/iot-central/core/media/howto-upload-file-rest-api/raw-data.png b/articles/iot-central/core/media/howto-upload-file-rest-api/raw-data.png new file mode 100644 index 000000000000..98798de707d7 Binary files /dev/null and b/articles/iot-central/core/media/howto-upload-file-rest-api/raw-data.png differ diff --git a/articles/iot-central/core/media/howto-use-properties/update-unassigned-device-properties.png b/articles/iot-central/core/media/howto-use-properties/update-unassigned-device-properties.png new file mode 100644 index 000000000000..61615cab33fc Binary files /dev/null and b/articles/iot-central/core/media/howto-use-properties/update-unassigned-device-properties.png differ diff --git a/articles/iot-central/core/media/howto-use-properties/view-unassigned-device-properties.png b/articles/iot-central/core/media/howto-use-properties/view-unassigned-device-properties.png new file mode 100644 index 000000000000..613397b71a2a Binary files /dev/null and b/articles/iot-central/core/media/howto-use-properties/view-unassigned-device-properties.png differ diff --git a/articles/iot-edge/how-to-visual-studio-develop-module.md b/articles/iot-edge/how-to-visual-studio-develop-module.md index 0d043d0648b0..9ae2c5bf501d 100644 --- a/articles/iot-edge/how-to-visual-studio-develop-module.md +++ b/articles/iot-edge/how-to-visual-studio-develop-module.md @@ -9,46 +9,59 @@ ms.date: 08/24/2021 ms.topic: conceptual ms.service: iot-edge --- -# Use Visual Studio 2019 to develop and debug modules for Azure IoT Edge +# Use Visual Studio 2022 to develop and debug modules for Azure IoT Edge [!INCLUDE [iot-edge-version-all-supported](../../includes/iot-edge-version-all-supported.md)] -This article shows you how to use Visual Studio 2019 to develop and debug Azure IoT Edge modules. +This article shows you how to use Visual Studio 2022 to develop and debug Azure IoT Edge modules. -The Azure IoT Edge Tools for Visual Studio extension provides the following benefits: +The **Azure IoT Edge Tools for Visual Studio** extension provides the following benefits: * Create, edit, build, run, and debug IoT Edge solutions and modules on your local development computer. +* Code your Azure IoT modules in C or C# with the benefits of Visual Studio development. * Deploy your IoT Edge solution to an IoT Edge device via Azure IoT Hub. -* Code your Azure IoT modules in C or C# while having all of the benefits of Visual Studio development. -* Manage IoT Edge devices and modules with UI. +* Manage IoT Edge devices and modules with the UI. -This article shows you how to use the Azure IoT Edge Tools for Visual Studio 2019 to develop your IoT Edge modules. You also learn how to deploy your project to an IoT Edge device. Currently, Visual Studio 2019 provides support for modules written in C and C#. The supported device architectures are Windows X64 and Linux X64 or ARM32. For more information about supported operating systems, languages, and architectures, see [Language and architecture support](module-development.md#language-and-architecture-support). +Visual Studio 2022 provides support for modules written in C and C#. The supported device architectures are Windows x64 and Linux x64 or ARM32, while ARM64 is in preview. For more information about supported operating systems, languages, and architectures, see [Language and architecture support](module-development.md#language-and-architecture-support). ## Prerequisites -This article assumes that you use a machine running Windows as your development machine. On Windows computers, you can develop either Windows or Linux modules. +This article assumes that you use a machine running Windows as your development machine. -* To develop modules with **Windows containers**, use a Windows computer running version 1809/build 17763 or newer. -* To develop modules with **Linux containers**, use a Windows computer that meets the [requirements for Docker Desktop](https://docs.docker.com/docker-for-windows/install/#what-to-know-before-you-install). +* On Windows computers, you can develop either Windows or Linux modules. -Install Visual Studio on your development machine. Make sure you include the **Azure development** and **Desktop development with C++** workloads in your Visual Studio 2019 installation. You can [Modify Visual Studio 2019](/visualstudio/install/modify-visual-studio?view=vs-2019&preserve-view=true) to add the required workloads. + * To develop modules with **Windows containers**, use a Windows computer running version 1809/build 17763 or newer. + * To develop modules with **Linux containers**, use a Windows computer that meets the [requirements for Docker Desktop](https://docs.docker.com/docker-for-windows/install/#what-to-know-before-you-install). -After your Visual Studio 2019 is ready, you also need the following tools and components: +* Install Visual Studio on your development machine. Make sure you include the **Azure development** and **Desktop development with C++** workloads in your Visual Studio 2022 installation. Alternatively, you can [Modify Visual Studio 2022](/visualstudio/install/modify-visual-studio?view=vs-2022&preserve-view=true) to add the required workloads, if Visual Studio is already installed on your machine. -* Download and install [Azure IoT Edge Tools](https://marketplace.visualstudio.com/items?itemName=vsc-iot.vs16iotedgetools) from the Visual Studio marketplace to create an IoT Edge project in Visual Studio 2019. +* Install the Azure IoT Edge Tools either from the Marketplace or from Visual Studio: - > [!TIP] - > If you are using Visual Studio 2017, download and install [Azure IoT Edge Tools for VS 2017](https://marketplace.visualstudio.com/items?itemName=vsc-iot.vsiotedgetools) from the Visual Studio marketplace + * Download and install [Azure IoT Edge Tools](https://marketplace.visualstudio.com/items?itemName=vsc-iot.vs17iotedgetools) from the Visual Studio Marketplace. + + > [!TIP] + > If you are using Visual Studio 2019, download and install [Azure IoT Edge Tools for VS 2019](https://marketplace.visualstudio.com/items?itemName=vsc-iot.vs16iotedgetools) from the Visual Studio marketplace + + * Or, in Visual Studio go to **Tools > Get Tools and Features**. The Visual Studio Installer will open. From the **Individual components** tab, select **Azure IoT Edge Tools for VS 2022**, then select **Install** in the lower right of the popup. Close the popup when finished. + + If you only need to update your tools, go to the **Manage Extensions** window, expand **Updates > Visual Studio Marketplace**, select **Azure IoT Edge Tools** then select **Update**. + + After the update is complete, select **Close** and restart Visual Studio. -* Download and install [Docker Community Edition](https://docs.docker.com/install/) on your development machine to build and run your module images. You'll need to set Docker CE to run in either Linux container mode or Windows container mode, depending on the type of modules you are developing. +* Download and install [Docker Community Edition](https://docs.docker.com/install/) on your development machine to build and run your module images. Set Docker CE to run in either Linux container mode or Windows container mode, depending on the type of modules you are developing. -* Set up your local development environment to debug, run, and test your IoT Edge solution by installing the [Azure IoT EdgeHub Dev Tool](https://pypi.org/project/iotedgehubdev/). Install [Python (3.5/3.6/3.7/3.8) and Pip](https://www.python.org/) and then install the **iotedgehubdev** package by running the following command in your terminal. Make sure your Azure IoT EdgeHub Dev Tool version is greater than 0.3.0. +* Set up your local development environment to debug, run, and test your IoT Edge solution by installing the [Azure IoT EdgeHub Dev Tool](https://pypi.org/project/iotedgehubdev/). Install [Python (3.5/3.6/3.7/3.8) and Pip](https://www.python.org/) and then install the **iotedgehubdev** package by running the following command in your terminal. ```cmd pip install --upgrade iotedgehubdev ``` + + > [!TIP] + >Make sure your Azure IoT EdgeHub Dev Tool version is greater than 0.3.0. You'll need to have a pre-existing IoT Edge device in the Azure portal and have your connection string ready during setup. -* Install the Vcpkg library manager, and then install the **azure-iot-sdk-c package** for Windows. + You may need to restart Visual Studio to complete the installation. + +* Install the **Vcpkg** library manager ```cmd git clone https://github.com/Microsoft/vcpkg @@ -56,6 +69,7 @@ After your Visual Studio 2019 is ready, you also need the following tools and co bootstrap-vcpkg.bat ``` + Install the **azure-iot-sdk-c** package for Windows ```cmd vcpkg.exe install azure-iot-sdk-c:x64-windows vcpkg.exe --triplet x64-windows integrate install @@ -66,73 +80,76 @@ After your Visual Studio 2019 is ready, you also need the following tools and co > [!TIP] > You can use a local Docker registry for prototype and testing purposes instead of a cloud registry. -* To test your module on a device, you'll need an active IoT hub with at least one IoT Edge device. To quickly create an IoT Edge device for testing, follow the steps in the quickstart for [Linux](quickstart-linux.md) or [Windows](quickstart.md). If you are running IoT Edge daemon on your development machine, you might need to stop EdgeHub and EdgeAgent before you start development in Visual Studio. - -### Check your tools version +* To test your module on a device, you'll need an active IoT Hub with at least one IoT Edge device. To create an IoT Edge device for testing you can create one in the Azure portal or with the CLI: -1. From the **Extensions** menu, select **Manage Extensions**. Expand **Installed > Tools** and you can find **Azure IoT Edge Tools for Visual Studio** and **Cloud Explorer for Visual Studio**. + * Creating one in the [Azure portal](https://portal.azure.com/) is the quickest. From the Azure portal, go to your IoT Hub resource. Select **IoT Edge** from the menu on the left and then select **Add IoT Edge Device**. -1. Note the installed version. You can compare this version with the latest version on Visual Studio Marketplace ([Cloud Explorer](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.CloudExplorerForVS2019), [Azure IoT Edge](https://marketplace.visualstudio.com/items?itemName=vsc-iot.vs16iotedgetools)) + :::image type="content" source="./media/how-to-visual-studio-develop-module/create-new-iot-edge-device.png" alt-text="Screenshot of how to add a new I o T Edge device"::: + + A new popup called **Create a device** will appear. Add a name to your device (known as the Device ID), then select **Save** in the lower left. + + Finally, confirm that your new device exists in your IoT Hub, from the **Device management > IoT Edge** menu. For more information on creating an IoT Edge device through the Azure portal, read [Create and provision an IoT Edge device on Linux using symmetric keys](how-to-provision-single-device-linux-symmetric.md). -1. If your version is older than what's available on Visual Studio Marketplace, update your tools in Visual Studio as shown in the following section. + * To create an IoT Edge device with the CLI follow the steps in the quickstart for [Linux](quickstart-linux.md#register-an-iot-edge-device) or [Windows](quickstart.md#register-an-iot-edge-device). In the process of registering an IoT Edge device, you create an IoT Edge device. -> [!NOTE] -> If you are using Visual Studio 2022, [Cloud Explorer](/visualstudio/azure/vs-azure-tools-resources-managing-with-cloud-explorer?view=vs-2022&preserve-view=true) is retired. To deploy Azure IoT Edge modules, use [Azure CLI](how-to-deploy-modules-cli.md?view=iotedge-2020-11&preserve-view=true) or [Azure portal](how-to-deploy-modules-portal.md?view=iotedge-2020-11&preserve-view=true). - -### Update your tools - -1. In the **Manage Extensions** window, expand **Updates > Visual Studio Marketplace**, select **Azure IoT Edge Tools** or **Cloud Explorer for Visual Studio** and select **Update**. - -1. After the tools update is downloaded, close Visual Studio to trigger the tools update using the VSIX installer. - -1. In the installer, select **OK** to start and then **Modify** to update the tools. - -1. After the update is complete, select **Close** and restart Visual Studio. + If you are running the IoT Edge daemon on your development machine, you might need to stop EdgeHub and EdgeAgent before you start development in Visual Studio. ## Create an Azure IoT Edge project -The IoT Edge project template in Visual Studio creates a solution that can be deployed to IoT Edge devices. First you create an Azure IoT Edge solution, and then you generate the first module in that solution. Each IoT Edge solution can contain more than one module. +The IoT Edge project template in Visual Studio creates a solution that can be deployed to IoT Edge devices. In summary, first you'll create an Azure IoT Edge solution, and then you'll generate the first module in that solution. Each IoT Edge solution can contain more than one module. + +In all, we're going to build three projects in our solution. The main module that contains EdgeAgent and EdgeHub, in addition to the temperature sensor module, then you'll add two more IoT Edge modules. > [!TIP] -> The IoT Edge project structure created by Visual Studio is not the same as in Visual Studio Code. +> The IoT Edge project structure created by Visual Studio is not the same as the one in Visual Studio Code. 1. In Visual Studio, create a new project. -1. On the **Create a new project** page, search for **Azure IoT Edge**. Select the project that matches the platform and architecture for your IoT Edge device, and click **Next**. +1. In the **Create a new project** window, search for **Azure IoT Edge**. Select the project that matches the platform and architecture for your IoT Edge device, and click **Next**. :::image type="content" source="./media/how-to-visual-studio-develop-module/create-new-project.png" alt-text="Create New Project"::: -1. On the **Configure your new project** page, enter a name for your project and specify the location, then select **Create**. +1. In the **Configure your new project** window, enter a name for your project and specify the location, then select **Create**. -1. On the **Add Module** window, select the type of module you want to develop. You can also select **Existing module** to add an existing IoT Edge module to your deployment. Specify your module name and module image repository. +1. In the **Add Module** window, select the type of module you want to develop. You can also select **Existing module** to add an existing IoT Edge module to your deployment. Specify your module name and module image repository. - Visual Studio autopopulates the repository URL with **localhost:5000/**. If you use a local Docker registry for testing, then **localhost** is fine. If you use Azure Container Registry, then replace **localhost:5000** with the login server from your registry's settings. The login server looks like **_\_.azurecr.io**.The final result should look like **\<*registry name*\>.azurecr.io/_\_**. + Visual Studio autopopulates the repository URL with **localhost:5000/**. If you use a local Docker registry for testing, then **localhost** is fine. If you use Azure Container Registry, then replace **localhost:5000** with the login server from your registry's settings. + + The login server looks like **_\_.azurecr.io**.The final result should look like **\<*registry name*\>.azurecr.io/_\_**, for example **my-registry-name.azurecr.io/my-module-name**. Select **Add** to add your module to the project. ![Add Application and Module](./media/how-to-visual-studio-develop-csharp-module/add-module.png) + > [!NOTE] + >If you have an existing IoT Edge project, you can still change the repository URL by opening the **module.json** file. The repository URL is located in the 'repository' property of the JSON file. + Now you have an IoT Edge project and an IoT Edge module in your Visual Studio solution. -The module folder contains a file for your module code, named either `program.cs` or `main.c` depending on the language you chose. This folder also contains a file named `module.json` that describes the metadata of your module. Various Docker files provide the information needed to build your module as a Windows or Linux container. +#### Project structure + +In your solution is a main project folder and a single module folder. Both are on the project level. The main project folder contains your deployment manifest. -The project folder contains a list of all the modules included in that project. Right now it should show only one module, but you can add more. For more information about adding modules to a project, see the [Build and debug multiple modules](#build-and-debug-multiple-modules) section later in this article. +The module project folder contains a file for your module code named either `program.cs` or `main.c` depending on the language you chose. This folder also contains a file named `module.json` that describes the metadata of your module. Various Docker files included here provide the information needed to build your module as a Windows or Linux container. +#### Deployment manifest of your project -The project folder also contains a file named `deployment.template.json`. This file is a template of an IoT Edge deployment manifest, which defines all the modules that will run on a device along with how they will communicate with each other. For more information about deployment manifests, see [Learn how to deploy modules and establish routes](module-composition.md). If you open this deployment template, you see that the two runtime modules, **edgeAgent** and **edgeHub** are included, along with the custom module that you created in this Visual Studio project. A fourth module named **SimulatedTemperatureSensor** is also included. This default module generates simulated data that you can use to test your modules, or delete if it's not necessary. To see how the simulated temperature sensor works, view the [SimulatedTemperatureSensor.csproj source code](https://github.com/Azure/iotedge/tree/master/edge-modules/SimulatedTemperatureSensor). +The deployment manifest you'll edit is called `deployment.debug.template.json`. This file is a template of an IoT Edge deployment manifest, which defines all the modules that run on a device along with how they communicate with each other. For more information about deployment manifests, see [Learn how to deploy modules and establish routes](module-composition.md). + +If you open this deployment template, you see that the two runtime modules, **edgeAgent** and **edgeHub** are included, along with the custom module that you created in this Visual Studio project. A fourth module named **SimulatedTemperatureSensor** is also included. This default module generates simulated data that you can use to test your modules, or delete if it's not necessary. To see how the simulated temperature sensor works, view the [SimulatedTemperatureSensor.csproj source code](https://github.com/Azure/iotedge/tree/master/edge-modules/SimulatedTemperatureSensor). ### Set IoT Edge runtime version The IoT Edge extension defaults to the latest stable version of the IoT Edge runtime when it creates your deployment assets. Currently, the latest stable version is version 1.2. If you're developing modules for devices running the 1.1 long-term support version or the earlier 1.0 version, update the IoT Edge runtime version in Visual Studio to match. -1. In the Solution Explorer, right-click the name of your project and select **Set IoT Edge runtime version**. +1. In the Solution Explorer, right-click the name of your main project and select **Set IoT Edge runtime version**. - :::image type="content" source="./media/how-to-visual-studio-develop-module/set-iot-edge-runtime-version.png" alt-text="Right-click your project name and select set IoT Edge runtime version."::: + :::image type="content" source="./media/how-to-visual-studio-develop-module/set-iot-edge-runtime-version.png" alt-text="Screenshot of how to find and select the menu item named 'Set I o T Edge Runtime version'."::: -1. Use the drop-down menu to choose the runtime version that your IoT Edge devices are running, then select **OK** to save your changes. +1. Use the drop-down menu to choose the runtime version that your IoT Edge devices are running, then select **OK** to save your changes. If no change was made, select **Cancel** to exit. -1. Re-generate your deployment manifest with the new runtime version. Right-click the name of your project and select **Generate deployment for IoT Edge**. +1. If you changed the version, re-generate your deployment manifest by right-clicking the name of your project and select **Generate deployment for IoT Edge**. This will generate a deployment manifest based on your deployment template and will appear in the **config** folder of your Visual Studio project. -## Develop your module +## Module infrastructure & development options When you add a new module, it comes with default code that is ready to be built and deployed to a device so that you can start testing without touching any code. The module code is located within the module folder in a file named `Program.cs` (for C#) or `main.c` (for C). @@ -142,15 +159,21 @@ When you're ready to customize the module template with your own code, use the [ ## Set up the iotedgehubdev testing tool -The IoT edgeHub dev tool provides a local development and debug experience. The tool helps start IoT Edge modules without the IoT Edge runtime so that you can create, develop, test, run, and debug IoT Edge modules and solutions locally. You don't have to push images to a container registry and deploy them to a device for testing. +The Azure IoT EdgeHub Dev Tool provides a local development and debug experience. The tool helps start IoT Edge modules without the IoT Edge runtime so that you can create, develop, test, run, and debug IoT Edge modules and solutions locally. You don't have to push images to a container registry and deploy them to a device for testing. For more information, see [Azure IoT EdgeHub Dev Tool](https://pypi.org/project/iotedgehubdev/). -To initialize the tool, provide an IoT Edge device connection string from IoT Hub. +To initialize the tool in Visual Studio: -1. Retrieve the connection string of an IoT Edge device from the Azure portal, the Azure CLI, or the Visual Studio Cloud Explorer. +1. Retrieve the connection string of your IoT Edge device (found in your IoT Hub) from the [Azure portal](https://portal.azure.com/) or from the Azure CLI. -1. From the **Tools** menu, select **Azure IoT Edge Tools** > **Setup IoT Edge Simulator**. + If using the CLI to retrieve your connection string, use this command, replacing "**[device_id]**" and "**[hub_name]**" with your own values: + + ```Azure CLI + az iot hub device-identity connection-string show --device-id [device_id] --hub-name [hub_name] + ``` + +1. From the **Tools** menu in Visual Studio, select **Azure IoT Edge Tools** > **Setup IoT Edge Simulator**. 1. Paste the connection string and click **OK**. @@ -162,17 +185,19 @@ To initialize the tool, provide an IoT Edge device connection string from IoT Hu Typically, you'll want to test and debug each module before running it within an entire solution with multiple modules. >[!TIP] ->Make sure you have switched over to the correct Docker container mode, either Linux container mode or Windows container mode, depending on the type of IoT Edge module you are developing. From the Docker Desktop menu, you can toggle between the two types of modes. Select **Switch to Windows containers** to use Windows containers, or select **Switch to Linux containers** to use Linux containers. +>Depending on the type of IoT Edge module you are developing, you may need to enable the correct Docker container mode: either Linux or Windows. From the Docker Desktop menu, you can toggle between the two types of modes. Select **Switch to Windows containers** or select **Switch to Linux containers**. For this tutorial, we use Linux. +> +>:::image type="content" source="./media/how-to-visual-studio-develop-module/system-tray.png" alt-text="Screenshot of how to find and select the menu item named 'Switch to Windows containers'."::: -1. In **Solution Explorer**, right-click the module folder and select **Set as StartUp Project** from the menu. +1. In **Solution Explorer**, right-click the module project folder and select **Set as StartUp Project** from the menu. - ![Set Start-up Project](./media/how-to-visual-studio-develop-csharp-module/module-start-up-project.png) + :::image type="content" source="./media/how-to-visual-studio-develop-module/module-start-up-project.png" alt-text="Screenshot of how to set project as startup project."::: -1. Press **F5** or click the run button in the toolbar to run the module. It may take 10–20 seconds the first time you do so. +1. Press **F5** or click the run button in the toolbar to run the module. It may take 10–20 seconds the first time you do so. Be sure you don't have other Docker containers running that might bind the port you need for this project. - ![Run Module](./media/how-to-visual-studio-develop-csharp-module/run-module.png) + :::image type="content" source="./media/how-to-visual-studio-develop-module/run-module.png" alt-text="Screenshot of how to run a module."::: -1. You should see a .NET Core console app start if the module has been initialized successfully. +1. You should see a .NET Core console app window appear if the module has been initialized successfully. 1. Set a breakpoint to inspect the module. @@ -185,9 +210,18 @@ Typically, you'll want to test and debug each module before running it within an curl --header "Content-Type: application/json" --request POST --data '{"inputName": "input1","data":"hello world"}' http://localhost:53000/api/v1/messages ``` - ![Debug Single Module](./media/how-to-visual-studio-develop-csharp-module/debug-single-module.png) + :::image type="content" source="./media/how-to-visual-studio-develop-csharp-module/debug-single-module.png" alt-text="Screenshot of the output console, Visual Studio project, and Bash window." lightbox="./media/how-to-visual-studio-develop-csharp-module/debug-single-module.png"::: + + The breakpoint should be triggered. You can watch variables in the Visual Studio **Locals** window, found when the debugger is running. Go to Debug > Windows > Locals. - The breakpoint should be triggered. You can watch variables in the Visual Studio **Locals** window. + In your Bash or shell, you should see a `{"message":"accepted"}` confirmation. + + In your .NET console you should see: + + ```dotnetcli + IoT Hub module client initialized. + Received message: 1, Body: [hello world] + ``` > [!TIP] > You can also use [PostMan](https://www.getpostman.com/) or other API tools to send messages instead of `curl`. @@ -198,38 +232,56 @@ Typically, you'll want to test and debug each module before running it within an After you're done developing a single module, you might want to run and debug an entire solution with multiple modules. -1. In **Solution Explorer**, add a second module to the solution by right-clicking the project folder. On the menu, select **Add** > **New IoT Edge Module**. +1. In **Solution Explorer**, add a second module to the solution by right-clicking the main project folder. On the menu, select **Add** > **New IoT Edge Module**. + + :::image type="content" source="./media/how-to-visual-studio-develop-module/add-new-module.png" alt-text="Screenshot of how to add a 'New I o T Edge Module' from the menu." lightbox="./media/how-to-visual-studio-develop-module/add-new-module.png"::: - ![Add a new module to an existing IoT Edge project](./media/how-to-visual-studio-develop-csharp-module/add-new-module.png) +1. In the `Add module` window give your new module a name and replace the `localhost:5000` portion of the repository URL with your Azure Container Registry login server, like you did before. -1. Open the file `deployment.template.json` and you'll see that the new module has been added in the **modules** section. A new route was also added to the **routes** section to send messages from the new module to IoT Hub. If you want to send data from the simulated temperature sensor to the new module, add another route like the following example: +1. Open the file `deployment.debug.template.json` to see that the new module has been added in the **modules** section. A new route was also added to the **routes** section in `EdgeHub` to send messages from the new module to IoT Hub. To send data from the simulated temperature sensor to the new module, add another route with the following line of `JSON`. Replace `` (in two places) with your own module name. ```json "sensorTo": "FROM /messages/modules/SimulatedTemperatureSensor/outputs/temperatureOutput INTO BrokeredEndpoint(\"/modules//inputs/input1\")" ``` -1. Right-click the project folder and select **Set as StartUp Project** from the context menu. +1. Right-click the main project (for example, `IoTEdgeProject`) and select **Set as StartUp Project**. -1. Create your breakpoints and then press **F5** to run and debug multiple modules simultaneously. You should see multiple .NET Core console app windows, which each window representing a different module. +1. Create breakpoints in each module and then press **F5** to run and debug multiple modules simultaneously. You should see multiple .NET Core console app windows, with each window representing a different module. - ![Debug Multiple Modules](./media/how-to-visual-studio-develop-csharp-module/debug-multiple-modules.png) + :::image type="content" source="./media/how-to-visual-studio-develop-csharp-module/debug-multiple-modules.png" alt-text="Screenshot of Visual Studio with two output consoles."::: 1. Press **Ctrl + F5** or select the stop button to stop debugging. ## Build and push images -1. Make sure the IoT Edge project is the start-up project, not one of the individual modules. Select either **Debug** or **Release** as the configuration to build for your module images. +1. Make sure the main IoT Edge project is the start-up project, not one of the individual modules. Select either **Debug** or **Release** as the configuration to build for your module images. > [!NOTE] > When choosing **Debug**, Visual Studio uses `Dockerfile.(amd64|windows-amd64).debug` to build Docker images. This includes the .NET Core command-line debugger VSDBG in your container image while building it. For production-ready IoT Edge modules, we recommend that you use the **Release** configuration, which uses `Dockerfile.(amd64|windows-amd64)` without VSDBG. -1. If you're using a private registry like Azure Container Registry (ACR), use the following Docker command to sign in to it. You can get the username and password from the **Access keys** page of your registry in the Azure portal. If you're using local registry, you can [run a local registry](https://docs.docker.com/registry/deploying/#run-a-local-registry). +1. If you're using a private registry like Azure Container Registry (ACR), use the following Docker command to sign in to it. You can get the username and password from the **Access keys** page of your registry in the Azure portal. ```cmd docker login -u -p ``` -1. If you're using a private registry like Azure Container Registry, you need to add your registry login information to the runtime settings found in the file `deployment.template.json`. Replace the placeholders with your actual ACR admin username, password, and registry name. +1. Let's add the Azure Container Registry login information to the runtime settings found in the file `deployment.debug.template.json`. There are two ways to do this. You can either add your registry credentials to your `.env` file (most secure) or add them directly to your `deployment.debug.template.json` file. + + **Add credentials to your `.env` file:** + + In the Solution Explorer, click the button that will **Show All Files**. The `.env` file will appear. Add your Azure Container Registry username and password to your `.env` file. These credentials can be found on the **Access Keys** page of your Azure Container Registry in the Azure portal. + + :::image type="content" source="./media/how-to-visual-studio-develop-module/show-env-file.png" alt-text="Screenshot of button that will show all files in the Solution Explorer."::: + + ```env + DEFAULT_RT_IMAGE=1.2 + CONTAINER_REGISTRY_USERNAME_myregistry= + CONTAINER_REGISTRY_PASSWORD_myregistry= + ``` + + **Add credentials directly to `deployment.debug.template.json`:** + + If you'd rather add your credentials directly to your deployment template, replace the placeholders with your actual ACR admin username, password, and registry name. ```json "settings": { @@ -248,25 +300,87 @@ After you're done developing a single module, you might want to run and debug an >[!NOTE] >This article uses admin login credentials for Azure Container Registry, which are convenient for development and test scenarios. When you're ready for production scenarios, we recommend a least-privilege authentication option like service principals. For more information, see [Manage access to your container registry](production-checklist.md#manage-access-to-your-container-registry). -1. In **Solution Explorer**, right-click the project folder and select **Build and Push IoT Edge Modules** to build and push the Docker image for each module. +1. If you're using a local registry, you can [run a local registry](https://docs.docker.com/registry/deploying/#run-a-local-registry). + +1. Finally, in the **Solution Explorer**, right-click the main project folder and select **Build and Push IoT Edge Modules** to build and push the Docker image for each module. This might take a minute. When you see `Finished Build and Push IoT Edge Modules.` in your Output console of Visual Studio, you are done. ## Deploy the solution -In the quickstart article that you used to set up your IoT Edge device, you deployed a module by using the Azure portal. You can also deploy modules using the Cloud Explorer for Visual Studio. You already have a deployment manifest prepared for your scenario, the `deployment.json` file and all you need to do is select a device to receive the deployment. +In the quickstart article that you used to set up your IoT Edge device, you deployed a module by using the Azure portal. You can also deploy modules using the CLI in Visual Studio. You already have a deployment manifest template you've been observing throughout this tutorial. Let's generate a deployment manifest from that, then use an Azure CLI command to deploy your modules to your IoT Edge device in Azure. -1. Open **Cloud Explorer** by clicking **View** > **Cloud Explorer**. Make sure you've logged in to Visual Studio 2019. +1. Right-click on your main project in Visual Studio Solution Explorer and choose **Generate Deployment for IoT Edge**. -1. In **Cloud Explorer**, expand your subscription, find your Azure IoT Hub and the Azure IoT Edge device you want to deploy. + :::image type="content" source="./media/how-to-visual-studio-develop-module/generate-deployment.png" alt-text="Screenshot of location of the 'generate deployment' menu item."::: -1. Right-click on the IoT Edge device to create a deployment for it. Navigate to the deployment manifest configured for your platform located in the **config** folder in your Visual Studio solution, such as `deployment.arm32v7.json`. +1. Go to your local Visual Studio main project folder and look in the `config` folder. The file path might look like this: `C:\Users\\source\repos\\config`. Here you'll find the generated deployment manifest such as `deployment.amd64.debug.json`. -1. Click the refresh button to see the new modules running along with the **SimulatedTemperatureSensor** module and **$edgeAgent** and **$edgeHub**. +1. Check your `deployment.amd64.debug.json` file to confirm the `edgeHub` schema version is set to 1.2. -## View generated data + ```json + "$edgeHub": { + "properties.desired": { + "schemaVersion": "1.2", + "routes": { + "IotEdgeModule2022ToIoTHub": "FROM /messages/modules/IotEdgeModule2022/outputs/* INTO $upstream", + "sensorToIotEdgeModule2022": "FROM /messages/modules/SimulatedTemperatureSensor/outputs/temperatureOutput INTO BrokeredEndpoint(\"/modules/IotEdgeModule2022/inputs/input1\")", + "IotEdgeModule2022bToIoTHub": "FROM /messages/modules/IotEdgeModule2022b/outputs/* INTO $upstream" + }, + "storeAndForwardConfiguration": { + "timeToLiveSecs": 7200 + } + } + } + ``` + > [!TIP] + > The deployment template for Visual Studio 2022 requires the 1.2 schema version. If you need it to be 1.1 or 1.0, wait until after the deployment is generated (do not change it in `deployment.debug.template.json`). Generating a deployment will create a 1.2 schema by default. However, you can manually change `deployment.amd64.debug.json`, the generated manifest, if needed before deploying it to Azure. + + > [!IMPORTANT] + > Once your IoT Edge device is deployed, it currently won't display correctly in the Azure portal with schema version 1.2 (version 1.1 will be fine). This is a known bug and will be fixed soon. However, this won't affect your device, as it's still connected in IoT Hub and can be communicated with at any time using the Azure CLI. + > + >:::image type="content" source="./media/how-to-publish-subscribe/unsupported-1.2-schema.png" alt-text="Screenshot of Azure portal error on the I o T Edge device page."::: + +1. Now let's deploy our manifest with an Azure CLI command. Open the Visual Studio **Developer Command Prompt** and change to the **config** directory. + + ```cmd + cd config + ``` + +1. From your **config** folder, execute the following deployment command. Replace the `[device id]`, `[hub name]`, and `[file path]` with your values. -1. To monitor the D2C message for a specific IoT Edge device, select it in your IoT hub in **Cloud Explorer** and then click **Start Monitoring Built-in Event Endpoint** in the **Action** window. + ```cmd + az iot edge set-modules --device-id [device id] --hub-name [hub name] --content [file path] + ``` + + For example, your command might look like this: + + ```cmd + az iot edge set-modules --device-id my-device-name --hub-name my-iot-hub-name --content deployment.amd64.debug.json + ``` + +1. After running the command, you'll see a confirmation of deployment printed in `JSON` in your command prompt. + +### Confirm the deployment to your device + +To check that your IoT Edge modules were deployed to Azure, sign in to your device (or virtual machine), for example through SSH or Azure Bastion, and run the IoT Edge list command. + +```azurecli + iotedge list +``` + +You should see a list of your modules running on your device or virtual machine. + +```azurecli + NAME STATUS DESCRIPTION CONFIG + SimulatedTemperatureSensor running Up a day mcr.microsoft.com/azureiotedge-simulated-temperature-sensor:1.0 + edgeAgent running Up a day mcr.microsoft.com/azureiotedge-agent:1.2 + edgeHub running Up a day mcr.microsoft.com/azureiotedge-hub:1.2 + myIotEdgeModule running Up 2 hours myregistry.azurecr.io/myiotedgemodule:0.0.1-amd64.debug + myIotEdgeModule2 running Up 2 hours myregistry.azurecr.io/myiotedgemodule2:0.0.1-amd64.debug +``` + +## View generated data -1. To stop monitoring data, select **Stop Monitoring Built-in Event Endpoint** in the **Action** window. +To monitor the device-to-cloud (D2C) messages for a specific IoT Edge device, review the [Tutorial: Monitor IoT Edge devices](tutorial-monitor-with-workbooks.md) to get started. ## Next steps diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/add-new-module.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/add-new-module.png new file mode 100644 index 000000000000..2e20d502d754 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/add-new-module.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-iot-edge-device.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-iot-edge-device.png new file mode 100644 index 000000000000..6f2076269342 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-iot-edge-device.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-project.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-project.png index 0334ec57dea4..b778c610a3e9 100644 Binary files a/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-project.png and b/articles/iot-edge/media/how-to-visual-studio-develop-module/create-new-project.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/device-created-confirm.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/device-created-confirm.png new file mode 100644 index 000000000000..95b7e3ebdf76 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/device-created-confirm.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/generate-deployment.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/generate-deployment.png new file mode 100644 index 000000000000..5c168f0ab0c8 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/generate-deployment.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/get-dev-command-prompt.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/get-dev-command-prompt.png new file mode 100644 index 000000000000..0dc520913093 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/get-dev-command-prompt.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/module-start-up-project.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/module-start-up-project.png new file mode 100644 index 000000000000..673d52f42193 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/module-start-up-project.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/run-module.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/run-module.png new file mode 100644 index 000000000000..a81cdc5c1993 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/run-module.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/set-iot-edge-runtime-version.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/set-iot-edge-runtime-version.png index a0c6636b72a2..2d889e48e765 100644 Binary files a/articles/iot-edge/media/how-to-visual-studio-develop-module/set-iot-edge-runtime-version.png and b/articles/iot-edge/media/how-to-visual-studio-develop-module/set-iot-edge-runtime-version.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/show-env-file.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/show-env-file.png new file mode 100644 index 000000000000..13c754486563 Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/show-env-file.png differ diff --git a/articles/iot-edge/media/how-to-visual-studio-develop-module/system-tray.png b/articles/iot-edge/media/how-to-visual-studio-develop-module/system-tray.png new file mode 100644 index 000000000000..d699fa32a8af Binary files /dev/null and b/articles/iot-edge/media/how-to-visual-studio-develop-module/system-tray.png differ diff --git a/articles/iot-hub/tutorial-routing.md b/articles/iot-hub/tutorial-routing.md index 956e34cb7200..ba1a7ce883c3 100644 --- a/articles/iot-hub/tutorial-routing.md +++ b/articles/iot-hub/tutorial-routing.md @@ -388,7 +388,7 @@ If you want to remove all of the Azure resources you used for this tutorial, del 1. Review all the resources that are in the resource group to determine which ones you want to clean up. - * If you want to delete all the resources, use the [az group delete](/cli/azure/groupt#az-group-delete) command. + * If you want to delete all the resources, use the [az group delete](/cli/azure/group#az-group-delete) command. ```azurecli-interactive az group delete --name $resourceGroup diff --git a/articles/lab-services/TOC.yml b/articles/lab-services/TOC.yml index 900777aaae05..41c55425fd18 100644 --- a/articles/lab-services/TOC.yml +++ b/articles/lab-services/TOC.yml @@ -237,6 +237,12 @@ href: how-to-access-vm-for-students-within-teams.md - name: Earlier releases items: + - name: Labs architecture fundamentals + href: classroom-labs-fundamentals-1.md + - name: Administrator guide + href: administrator-guide-1.md + - name: Lab account creation guide + href: account-setup-guide.md - name: Create & configure lab accounts items: - name: Create and manage labs @@ -259,10 +265,6 @@ href: how-to-add-user-lab-owner.md - name: Manage labs in a lab account href: manage-labs-1.md - - name: Administrator guide - href: administrator-guide-1.md - - name: Lab account creation guide - href: account-setup-guide.md - name: Az.LabServices PowerShell module for lab accounts href: reference-powershell-module.md - name: Reference diff --git a/articles/lab-services/classroom-labs-fundamentals-1.md b/articles/lab-services/classroom-labs-fundamentals-1.md new file mode 100644 index 000000000000..8fc83e676d22 --- /dev/null +++ b/articles/lab-services/classroom-labs-fundamentals-1.md @@ -0,0 +1,47 @@ +--- +title: Architecture fundamentals with lab accounts in Azure Lab Services | Microsoft Docs +description: This article will cover the fundamental resources used by Lab Services and basic architecture of a lab that using lab accounts. +author: emaher +ms.topic: overview +ms.date: 05/30/2022 +ms.service: lab-services +ms.author: enewman +--- + +# Architecture Fundamentals in Azure Lab Services when using lab accounts + +[!INCLUDE [preview note](./includes/lab-services-new-update-note.md)] + +Azure Lab Services is a SaaS (software as a service) solution, which means that the resources needed by Lab Services are handled for you. This article will cover the fundamental resources used by Lab Services and basic architecture of a lab. + +Azure Lab Services does provide a couple of areas that allow you to use your own resources with Lab Services. For more information about using VMs on your own network, see how to [peer a virtual network](how-to-connect-peer-virtual-network.md). To reuse images from an Azure Compute Gallery, see how to [attach a compute gallery](how-to-attach-detach-shared-image-gallery.md). + +Below is the basic architecture of a lab. The lab account is hosted in your subscription. The student VMs, along with the resources needed to support the VMs are hosted in a subscription owned by Azure Lab Services. Let’s talk about what is in Azure Lab Service's subscriptions in more detail. + +:::image type="content" source="./media/classroom-labs-fundamentals-1/labservices-basic-architecture.png" alt-text="Architecture diagram of labs using lab accounts in Azure Lab Services."::: + +## Hosted Resources + +The resources required to run a lab are hosted in one of the Microsoft-managed Azure subscriptions. Resources include: + +- template virtual machine for the educator +- virtual machine for each student +- network-related items such as a load balancer, virtual network, and network security group. + +These subscriptions are monitored for suspicious activity. It's important to note that this monitoring is done externally to the virtual machines through VM extension or network pattern monitoring. If [shutdown on disconnect](how-to-enable-shutdown-disconnect.md) is enabled, a diagnostic extension is enabled on the virtual machine. The extension allows Lab Services to be informed of the remote desktop protocol (RDP) session disconnect event. + +## Virtual Network + +Each lab is isolated by its own virtual network. If the lab has a [peered virtual network](how-to-connect-peer-virtual-network.md), then each lab is isolated by its own subnet. Students connect to their virtual machine through a load balancer. No student virtual machines have a public IP address; they only have a private IP address. The connection string for the student will be the public IP address of the load balancer and a random port between 49152 and 65535. Inbound rules on the load balancer forward the connection, depending on the operating system, to either port 22 (SSH) or port 3389 (RDP) of the appropriate virtual machine. An NSG prevents outside traffic on any other ports. + +## Access control to the virtual machines + +Lab Services handles the student’s ability to perform actions like start and stop on their virtual machines. It also controls access to their VM connection information. + +Lab Services also handles the registration of students to the service. There are currently two different access settings: restricted and nonrestricted. For more information, see the [manage lab users](how-to-configure-student-usage.md#send-invitations-to-users) article. Restricted access means Lab Services verifies that the students are added as user before allowing access. Nonrestricted means any user can register as long as they have the registration link and there's capacity in the lab. Nonrestricted can be useful for hackathon events. + +Student VMs that are hosted in the lab have a username and password set by the creator of the lab. Alternately, the creator of the lab can allow registered students to choose their own password on first sign-in. + +## Next steps + +To learn more about features available in Lab Services, see [Azure Lab Services concepts](classroom-labs-concepts.md) and [Azure Lab Services overview](lab-services-overview.md). diff --git a/articles/lab-services/classroom-labs-fundamentals.md b/articles/lab-services/classroom-labs-fundamentals.md index bb95f5f0bee7..e2d400540cac 100644 --- a/articles/lab-services/classroom-labs-fundamentals.md +++ b/articles/lab-services/classroom-labs-fundamentals.md @@ -3,38 +3,49 @@ title: Architecture Fundamentals in Azure Lab Services | Microsoft Docs description: This article will cover the fundamental resources used by Lab Services and basic architecture of a lab. author: emaher ms.topic: overview -ms.date: 11/19/2021 +ms.date: 05/30/2022 ms.author: enewman +ms.service: lab-services --- # Architecture Fundamentals in Azure Lab Services -[!INCLUDE [preview note](./includes/lab-services-new-update-note.md)] +[!INCLUDE [preview note](./includes/lab-services-new-update-focused-article.md)] Azure Lab Services is a SaaS (software as a service) solution, which means that the resources needed by Lab Services are handled for you. This article will cover the fundamental resources used by Lab Services and basic architecture of a lab. -Azure Lab Services does provide a couple of areas that allow you to use your own resources in conjunction with Lab Services. For more information about using VMs on your own network, see how to [peer a virtual network](how-to-connect-peer-virtual-network.md). If using the April 2022 Update, see [Connect to your virtual network in Azure Lab Services](how-to-connect-vnet-injection.md) to use virtual network injection instead of virtual network peering. To reuse images from an Azure Compute Gallery, see how to [attach a compute gallery](how-to-attach-detach-shared-image-gallery.md). +Azure Lab Services does provide a couple of areas that allow you to use your own resources with Lab Services. For more information about using VMs on your own network, see [Connect to your virtual network in Azure Lab Services](how-to-connect-vnet-injection.md) to use virtual network injection instead of virtual network peering. To reuse images from an Azure Compute Gallery, see how to [attach a compute gallery](how-to-attach-detach-shared-image-gallery.md). -Below is the basic architecture of a lab. The lab account or lab plan is hosted in your subscription. The student VMs, along with the resources needed to support the VMs are hosted in a subscription owned by Azure Lab Services. Let’s talk about what is in Azure Lab Service's subscriptions in more detail. +Below is the basic architecture of a lab. The lab plan is hosted in your subscription. The student VMs, along with the resources needed to support the VMs are hosted in a subscription owned by Azure Lab Services. Let’s talk about what is in Azure Lab Service's subscriptions in more detail. -![labs basic architecture](./media/classroom-labs-fundamentals/labservices-basic-architecture.png) +:::image type="content" source="./media/classroom-labs-fundamentals/labservices-basic-architecture.png" alt-text="Architecture diagram of basic lab in Azure Lab Services."::: ## Hosted Resources -The resources required to run a lab are hosted in one of the Microsoft-managed Azure subscriptions. Resources include a template virtual machine for the educator, virtual machine for each student, and network-related items such as a load balancer, virtual network, and network security group. These subscriptions are monitored for suspicious activity. It is important to note that this monitoring is done externally to the virtual machines through VM extension or network pattern monitoring. If [shutdown on disconnect](how-to-enable-shutdown-disconnect.md) is enabled, a diagnostic extension is enabled on the virtual machine. The extension allows Lab Services to be informed of the remote desktop protocol (RDP) session disconnect event. +The resources required to run a lab are hosted in one of the Microsoft-managed Azure subscriptions. Resources include: + +- template virtual machine for the educator +- virtual machine for each student +- network-related items such as a load balancer, virtual network, and network security group + +These subscriptions are monitored for suspicious activity. It's important to note that this monitoring is done externally to the virtual machines through VM extension or network pattern monitoring. If [shutdown on disconnect](how-to-enable-shutdown-disconnect.md) is enabled, a diagnostic extension is enabled on the virtual machine. The extension allows Lab Services to be informed of the remote desktop protocol (RDP) session disconnect event. ## Virtual Network -> [!NOTE] -> For the latest experience in Azure Lab Services using your virtual network, see [Connect to your virtual network](how-to-connect-vnet-injection.md). This experience replaces the peer virtual network experience. +Each lab is isolated by its own virtual network. If the lab is using [advanced networking](how-to-connect-vnet-injection.md), then each lab using the same subnet that has been delegated to Azure Lab Services and connected to the lab plan. + +Students connect to their virtual machine through a load balancer. No student virtual machines have a public IP address; they only have a private IP address. The connection string for the student will be the public IP address of the load balancer and a random port between: + +- 4980-4989 and 5000-6999 for SSH connections +- 4990-4999 and 7000-8999 for RDP connections -Each lab is isolated by its own virtual network. If the lab has a [peered virtual network](how-to-connect-peer-virtual-network.md), then each lab is isolated by its own subnet. Students connect to their virtual machine through a load balancer. No student virtual machines have a public IP address; they only have a private ip address. The connection string for the student will be the public IP address of the load balancer and a random port between 49152 and 65535. Inbound rules on the load balancer forward the connection, depending on the operating system, to either port 22 (SSH) or port 3389 (RDP) of the appropriate virtual machine. An NSG prevents outside traffic on any other ports. +Inbound rules on the load balancer forward the connection, depending on the operating system, to either port 22 (SSH) or port 3389 (RDP) of the appropriate virtual machine. An NSG prevents outside traffic on any other ports. ## Access control to the virtual machines Lab Services handles the student’s ability to perform actions like start and stop on their virtual machines. It also controls access to their VM connection information. -Lab Services also handles the registration of students to the service. There are currently two different access settings: restricted and nonrestricted. For more information, see the [manage lab users](how-to-configure-student-usage.md#send-invitations-to-users) article. Restricted access means Lab Services verifies that the students are added as user before allowing access. Nonrestricted means any user can register as long as they have the registration link and there is capacity in the lab. Nonrestricted can be useful for hackathon events. +Lab Services also handles the registration of students to the service. There are currently two different access settings: restricted and nonrestricted. For more information, see the [manage lab users](how-to-configure-student-usage.md#send-invitations-to-users) article. Restricted access means Lab Services verifies that the students are added as user before allowing access. Nonrestricted means any user can register as long as they have the registration link and there's capacity in the lab. Nonrestricted can be useful for hackathon events. Student VMs that are hosted in the lab have a username and password set by the creator of the lab. Alternately, the creator of the lab can allow registered students to choose their own password on first sign-in. diff --git a/articles/lab-services/how-to-configure-firewall-settings.md b/articles/lab-services/how-to-configure-firewall-settings.md index 1e022433536f..4527d34cb96f 100644 --- a/articles/lab-services/how-to-configure-firewall-settings.md +++ b/articles/lab-services/how-to-configure-firewall-settings.md @@ -11,7 +11,7 @@ ms.topic: how-to Each organization or school will configure their own network in a way that best fits their needs. Sometimes that includes setting firewall rules that block Remote Desktop Protocol (RDP) or Secure Shell (SSH) connections to machines outside their own network. Because Azure Lab Services runs in the public cloud, some extra configuration maybe needed to allow students to access their VM when connecting from the campus network. -Each lab uses single public IP address and multiple ports. All VMs, both the template VM and student VMs, will use this public IP address. The public IP address won’t change for the life of lab. Each VM will have a different port number. The port numbers range is 49152 - 65535. The combination of public IP address and port number is used to connect educators and students to the correct VM. This article will cover how to find the specific public IP address used by a lab. That information can be used to update inbound and outbound firewall rules so students can access their VMs. +Each lab uses single public IP address and multiple ports. All VMs, both the template VM and student VMs, will use this public IP address. The public IP address won’t change for the life of lab. Each VM will have a different port number. The port numbers range is 49152 - 65535. If using the April 2022 Update (preview), the port ranges for SSH connections are 4980-4989 and 5000-6999. The port ranges for RDP connections are 4990-4999 and 7000-8999. The combination of public IP address and port number is used to connect educators and students to the correct VM. This article will cover how to find the specific public IP address used by a lab. That information can be used to update inbound and outbound firewall rules so students can access their VMs. >[!IMPORTANT] >Each lab will have a different public IP address. @@ -30,7 +30,7 @@ The public IP addresses for each lab are listed in the **All labs** page of the ## Conclusion -Now we know the public IP address for the lab. Inbound and outbound rules can be created for the organization's firewall for the public ip address and the port range 49152 - 65535. Once the rules are updated, students can access their VMs without the network firewall blocking access. +Now we know the public IP address for the lab. Inbound and outbound rules can be created for the organization's firewall for the public IP address and the port range 49152 - 65535. Once the rules are updated, students can access their VMs without the network firewall blocking access. ## Next steps diff --git a/articles/lab-services/how-to-enable-nested-virtualization-template-vm-using-script.md b/articles/lab-services/how-to-enable-nested-virtualization-template-vm-using-script.md index b58621672f70..3c32fb6dc2b4 100644 --- a/articles/lab-services/how-to-enable-nested-virtualization-template-vm-using-script.md +++ b/articles/lab-services/how-to-enable-nested-virtualization-template-vm-using-script.md @@ -9,7 +9,7 @@ ms.date: 06/26/2020 Nested virtualization enables you to create a multi-VM environment inside a lab's template virtual machine. Publishing the template will provide each user in the lab with a virtual machine set up with multiple VMs within it. For more information about nested virtualization and Azure Lab Services, see [Enable nested virtualization on a template virtual machine in Azure Lab Services](how-to-enable-nested-virtualization-template-vm.md). -The steps in this article focus on setting up nested virtualization for Windows Server 2016, Windows Server 2019, or Windows 10. You will use a script to set up template machine with Hyper-V. The following steps will guide you through how to use the [Lab Services Hyper-V scripts](https://github.com/Azure/azure-devtestlab/tree/master/samples/ClassroomLabs/Scripts/HyperV). +The steps in this article focus on setting up nested virtualization for Windows Server 2016, Windows Server 2019, or Windows 10. You will use a script to set up template machine with Hyper-V. The following steps will guide you through how to use the [Lab Services Hyper-V scripts](https://github.com/Azure/LabServices/tree/main/General_Scripts/PowerShell/HyperV). >[!IMPORTANT] >Select **Large (nested virtualization)** or **Medium (nested virtualization)** for the virtual machine size when creating the lab. Nested virtualization will not work otherwise. diff --git a/articles/lab-services/media/classroom-labs-fundamentals-1/labservices-basic-architecture.png b/articles/lab-services/media/classroom-labs-fundamentals-1/labservices-basic-architecture.png new file mode 100644 index 000000000000..1509eec10102 Binary files /dev/null and b/articles/lab-services/media/classroom-labs-fundamentals-1/labservices-basic-architecture.png differ diff --git a/articles/lab-services/media/classroom-labs-fundamentals/labservices-basic-architecture.png b/articles/lab-services/media/classroom-labs-fundamentals/labservices-basic-architecture.png index 1509eec10102..6d1d18c65f8e 100644 Binary files a/articles/lab-services/media/classroom-labs-fundamentals/labservices-basic-architecture.png and b/articles/lab-services/media/classroom-labs-fundamentals/labservices-basic-architecture.png differ diff --git a/articles/load-testing/media/tutorial-cicd-github-actions/commit-workflow.png b/articles/load-testing/media/tutorial-cicd-github-actions/commit-workflow.png deleted file mode 100644 index 7ebc4ad74081..000000000000 Binary files a/articles/load-testing/media/tutorial-cicd-github-actions/commit-workflow.png and /dev/null differ diff --git a/articles/load-testing/media/tutorial-cicd-github-actions/github-actions-workflow-completed.png b/articles/load-testing/media/tutorial-cicd-github-actions/github-actions-workflow-completed.png index 55728ec0f7f8..16c4280f701e 100644 Binary files a/articles/load-testing/media/tutorial-cicd-github-actions/github-actions-workflow-completed.png and b/articles/load-testing/media/tutorial-cicd-github-actions/github-actions-workflow-completed.png differ diff --git a/articles/load-testing/quickstart-create-and-run-load-test.md b/articles/load-testing/quickstart-create-and-run-load-test.md index 99d67cf9fc79..8fe27326574b 100644 --- a/articles/load-testing/quickstart-create-and-run-load-test.md +++ b/articles/load-testing/quickstart-create-and-run-load-test.md @@ -49,7 +49,7 @@ Azure Load Testing enables you to quickly create a load test from the Azure port 1. On the **Quickstart test** page, enter the **Test URL**. - Enter the complete URL that you would like to run the test for. For example, https://www.example.com/login. + Enter the complete URL that you would like to run the test for. For example, `https://www.example.com/login`. 1. (Optional) Update the **Number of virtual users** to the total number of virtual users. @@ -112,4 +112,4 @@ You now have an Azure Load Testing resource, which you used to load test an exte You can reuse this resource to learn how to identify performance bottlenecks in an Azure-hosted application by using server-side metrics. > [!div class="nextstepaction"] -> [Identify performance bottlenecks](./tutorial-identify-bottlenecks-azure-portal.md) \ No newline at end of file +> [Identify performance bottlenecks](./tutorial-identify-bottlenecks-azure-portal.md) diff --git a/articles/load-testing/tutorial-cicd-github-actions.md b/articles/load-testing/tutorial-cicd-github-actions.md index 60e2dc53364d..6f93602183d9 100644 --- a/articles/load-testing/tutorial-cicd-github-actions.md +++ b/articles/load-testing/tutorial-cicd-github-actions.md @@ -1,25 +1,27 @@ --- -title: 'Tutorial: Identify performance regressions with Azure Load Testing and GitHub Actions' +title: 'Tutorial: Automate regression testing with GitHub Actions' titleSuffix: Azure Load Testing description: 'In this tutorial, you learn how to automate performance regression testing by using Azure Load Testing and GitHub Actions CI/CD workflows.' services: load-testing ms.service: load-testing ms.author: ninallam author: ninallam -ms.date: 03/28/2022 +ms.date: 05/30/2022 ms.topic: tutorial #Customer intent: As an Azure user, I want to learn how to automatically test builds for performance regressions on every pull request and/or deployment by using GitHub Actions. --- # Tutorial: Identify performance regressions with Azure Load Testing Preview and GitHub Actions -This tutorial describes how to automate performance regression testing by using Azure Load Testing Preview and GitHub Actions. You'll set up a GitHub Actions CI/CD workflow to deploy a sample Node.js application on Azure and trigger a load test using the [Azure Load Testing action](https://github.com/marketplace/actions/azure-load-testing). Once the load test finishes, you'll use the Azure Load Testing dashboard to identify performance issues. +This tutorial describes how to automate performance regression testing with Azure Load Testing Preview and GitHub Actions. -You'll deploy a sample Node.js web app on Azure App Service. The web app uses Azure Cosmos DB for storing the data. The sample application also contains an Apache JMeter script to load test three APIs. +You'll set up a GitHub Actions CI/CD workflow to deploy a sample Node.js application on Azure and trigger a load test using the [Azure Load Testing action](https://github.com/marketplace/actions/azure-load-testing). -If you're using Azure Pipelines for your CI/CD workflows, see the corresponding [Azure Pipelines tutorial](./tutorial-cicd-azure-pipelines.md). +You'll then define test failure criteria to ensure the application meets your goals. When a criterion isn't met, the CI/CD pipeline will fail. For more information, see [Define load test failure criteria](./how-to-define-test-criteria.md). + +Finally, you'll make the load test configurable by passing parameters from the CI/CD pipeline to the JMeter script. For example, you could use a GitHub secret to pass an authentication token the script. For more information, see [Parameterize load tests with secrets and environment variables](./how-to-parameterize-load-tests.md). -Learn more about the [key concepts for Azure Load Testing](./concept-load-testing-concepts.md). +If you're using Azure Pipelines for your CI/CD workflows, see the corresponding [Azure Pipelines tutorial](./tutorial-cicd-azure-pipelines.md). You'll learn how to: @@ -41,7 +43,7 @@ You'll learn how to: ## Set up the sample application repository -To get started with this tutorial, you first need to set up a sample Node.js web application. The sample application contains a GitHub Actions workflow definition to deploy the application on Azure and trigger a load test. +To get started with this tutorial, you first need to set up a sample Node.js web application. The sample application repository contains a GitHub Actions workflow definition that deploys the Node.js application on Azure and then triggers a load test. [!INCLUDE [azure-load-testing-set-up-sample-application](../../includes/azure-load-testing-set-up-sample-application.md)] @@ -70,7 +72,7 @@ First, you'll create an Azure Active Directory [service principal](../active-dir > [!NOTE] > Azure Login supports multiple ways to authenticate with Azure. For other authentication options, see the [Azure and GitHub integration site](/azure/developer/github). - The output is the role assignment credentials that provide access to your resource. The command should output a JSON object similar to this. + The output is the role assignment credentials that provide access to your resource. The command outputs a JSON object similar to the following snippet. ```json { @@ -82,9 +84,9 @@ First, you'll create an Azure Active Directory [service principal](../active-dir } ``` -1. Copy this JSON object, which you can use to authenticate from GitHub. +1. Copy this JSON object. You'll store this value as a GitHub secret in a later step. -1. Grant permissions to the service principal to create and run tests with Azure Load Testing. The **Load Test Contributor** role grants permissions to create, manage and run tests in an Azure Load Testing resource. +1. Assign the service principal the **Load Test Contributor** role, which grants permission to create, manage and run tests in an Azure Load Testing resource. First, retrieve the ID of the service principal object by running this Azure CLI command: @@ -92,7 +94,9 @@ First, you'll create an Azure Active Directory [service principal](../active-dir az ad sp list --filter "displayname eq 'my-load-test-cicd'" -o table ``` - Next, run the following Azure CLI command to assign the *Load Test Contributor* role to the service principal. + Next, assign the **Load Test Contributor** role to the service principal. + + Replace the placeholder text `` with the `ObjectId` value from the previous Azure CLI command. Also, replace `` with your Azure subscription ID. ```azurecli az role assignment create --assignee "" \ @@ -100,12 +104,14 @@ First, you'll create an Azure Active Directory [service principal](../active-dir --scope /subscriptions//resourceGroups/ \ --subscription "" ``` - + In the previous command, replace the placeholder text `` with the `ObjectId` value from the previous Azure CLI command. Also, replace `` with your Azure subscription ID. +You now have a service principal that the necessary permissions to create and run a load test. + ### Configure the GitHub secret -You'll add a GitHub secret **AZURE_CREDENTIALS** to your repository for the service principal you created in the previous step. The Azure Login action in the GitHub Actions workflow uses this secret to authenticate with Azure. +Next, add a GitHub secret **AZURE_CREDENTIALS** to your repository to store the service principal you created earlier. You'll pass this GitHub secret to the Azure Login action to authenticate with Azure. 1. In [GitHub](https://github.com), browse to your forked repository, select **Settings** > **Secrets** > **New repository secret**. @@ -117,7 +123,7 @@ You'll add a GitHub secret **AZURE_CREDENTIALS** to your repository for the serv ### Authenticate with Azure -You can now use the `AZURE_CREDENTIALS` secret with the Azure Login action in your CI/CD workflow. The *workflow.yml* file in the sample application already has the necessary configuration: +You can now use the `AZURE_CREDENTIALS` secret with the Azure Login action in your CI/CD workflow. The *.github/workflows/workflow.yml* file in the sample application repository already has this configuration: ```yml jobs: @@ -138,13 +144,31 @@ jobs: creds: ${{ secrets.AZURE_CREDENTIALS }} ``` -You've now authorized your GitHub Actions workflow to access your Azure Load Testing resource. You'll now configure the CI/CD workflow to run a load test by using Azure Load Testing. +You've now authorized your GitHub Actions workflow to access your Azure Load Testing resource. You'll now configure the CI/CD workflow to run a load test with Azure Load Testing. ## Configure the GitHub Actions workflow to run a load test -In this section, you'll set up a GitHub Actions workflow that triggers the load test. The sample application repository contains a workflow file *SampleApp.yaml*. The workflow first deploys the sample web application to Azure App Service, and then invokes the load test by using the [Azure Load Testing Action](https://github.com/marketplace/actions/azure-load-testing). The GitHub Actions uses an environment variable to pass the URL of the web application to the Apache JMeter script. +In this section, you'll set up a GitHub Actions workflow that triggers the load test by using the [Azure Load Testing Action](https://github.com/marketplace/actions/azure-load-testing). -The GitHub Actions workflow performs the following steps for every update to the main branch: +The following code snippet shows an example of how to trigger a load test using the `azure/load-testing` action: + +```yml +- name: 'Azure Load Testing' +uses: azure/load-testing@v1 +with: + loadTestConfigFile: 'my-jmeter-script.jmx' + loadTestResource: my-load-test-resource + resourceGroup: my-resource-group + env: | + [ + { + "name": "webapp", + "value": "my-web-app.azurewebsites.net" + } + ] +``` + +The sample application repository already contains a sample workflow file *.github/workflows/workflow.yml*. The GitHub Actions workflow performs the following steps for every update to the main branch: - Deploy the sample Node.js application to an Azure App Service web app. - Create an Azure Load Testing resource using the *ARMTemplate/template.json* Azure Resource Manager (ARM) template, if the resource doesn't exist yet. Learn more about ARM templates [here](../azure-resource-manager/templates/overview.md). @@ -169,17 +193,15 @@ Follow these steps to configure the GitHub Actions workflow for your environment LOAD_TEST_RESOURCE_GROUP: "" ``` - These variables are used to configure the GitHub actions for deploying the sample application to Azure, and to connect to your Azure Load Testing resource. + These variables are used to configure the GitHub Actions for deploying the sample application to Azure, and to connect to your Azure Load Testing resource. 1. Commit your changes directly to the main branch. - :::image type="content" source="./media/tutorial-cicd-github-actions/commit-workflow.png" alt-text="Screenshot that shows selections for committing changes to the GitHub Actions workflow file."::: - The commit will trigger the GitHub Actions workflow in your repository. You can verify that the workflow is running by going to the **Actions** tab. ## View load test results -To view the results of the load test in the GitHub Actions workflow log: +When the load test finishes, view the results in the GitHub Actions workflow log: 1. Select the **Actions** tab in your GitHub repository to view the list of workflow runs. @@ -194,12 +216,14 @@ To view the results of the load test in the GitHub Actions workflow log: 1. On the screen that shows the workflow run's details, select the **loadTestResults** artifact to download the result files for the load test. :::image type="content" source="./media/tutorial-cicd-github-actions/github-actions-artifacts.png" alt-text="Screenshot that shows artifacts of the workflow run."::: - + ## Define test pass/fail criteria -In this section, you'll add criteria to determine whether your load test passes or fails. If at least one of the pass/fail criteria evaluates to `true`, the load test is unsuccessful. +You can use test failure criteria to define thresholds for when a load test should fail. For example, a test might fail when the percentage of failed requests surpasses a specific value. + +When at least one of the failure criteria is met, the load test status is failed. As a result, the CI/CD workflow will also fail and the development team can be alerted. -You can specify these criteria in the test configuration YAML file: +You can specify these criteria in the [test configuration YAML file](./reference-test-config-yaml.md): 1. Edit the *SampleApp.yml* file in your GitHub repository. @@ -244,13 +268,13 @@ You can specify these criteria in the test configuration YAML file: ## Pass parameters to your load tests from the workflow -Next, you'll parameterize your load test by using workflow variables. These parameters can be secrets, such as passwords, or non-secrets. +Next, you'll parameterize your load test by using workflow variables. These parameters can be secrets, such as passwords, or non-secrets. For more information, see [Parameterize load tests with secrets and environment variables](./how-to-parameterize-load-tests.md). -In this tutorial, you'll reconfigure the sample application to accept only secure requests. To send a secure request, you need to pass a secret value in the HTTP request: +In this tutorial, you'll now use the *SampleApp_Secrets.jmx* JMeter test script. This script invokes an application endpoint that requires a secure value to be passed as an HTTP header. -1. Edit the *SampleApp.yaml* file in your GitHub repository. +1. Edit the *SampleApp.yaml* file in your GitHub repository and update the `testPlan` configuration setting to use the *SampleApp_Secrets.jmx* file. - Update the `testPlan` configuration setting to use the *SampleApp_Secrets.jmx* file: + The `testPlan` setting specifies which JMeter script Azure Load Testing uses. ```yml version: v0.1 @@ -308,6 +332,7 @@ In this tutorial, you'll reconfigure the sample application to accept only secur You've now created a GitHub Actions workflow that uses Azure Load Testing for automatically running load tests. By using pass/fail criteria, you can set the status of the CI/CD workflow. With parameters, you can make the running of load tests configurable. +* Learn more about the [key concepts for Azure Load Testing](./concept-load-testing-concepts.md). * Learn more about the [Azure Load Testing Action](https://github.com/marketplace/actions/azure-load-testing). * Learn how to [parameterize a load test](./how-to-parameterize-load-tests.md). * Learn how to [define test pass/fail criteria](./how-to-define-test-criteria.md). \ No newline at end of file diff --git a/articles/logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md b/articles/logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md index 6db741669a47..6f4862923eec 100644 --- a/articles/logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md +++ b/articles/logic-apps/concepts-schedule-automated-recurring-tasks-workflows.md @@ -1,11 +1,11 @@ --- -title: Schedules for recurring triggers in workflows -description: An overview about scheduling recurring automated workflows in Azure Logic Apps. +title: About schedules for recurring triggers in workflows +description: An overview about schedules for recurring workflows in Azure Logic Apps. services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: conceptual -ms.date: 03/17/2022 +ms.date: 05/27/2022 --- # Schedules for recurring triggers in Azure Logic Apps workflows @@ -52,12 +52,14 @@ Here are the differences between these triggers: If you select **Day** as the frequency, you can specify the hours of the day and minutes of the hour, for example, every day at 2:30. If you select **Week** as the frequency, you can also select days of the week, such as Wednesday and Saturday. You can also specify a start date and time along with a time zone for your recurrence schedule. For more information about time zone formatting, see [Add a Recurrence trigger](../connectors/connectors-native-recurrence.md#add-the-recurrence-trigger). > [!IMPORTANT] - > If you use the **Day** or **Week** frequency and specify a future date and time, make sure that you set up the recurrence in advance: + > If you use the **Day**, **Week**, or **Month** frequency, and you specify a future date and time, make sure that you set up the recurrence in advance: > > * **Day**: Set up the daily recurrence at least 24 hours in advance. > > * **Week**: Set up the weekly recurrence at least 7 days in advance. - > + > + > * **Month**: Set up the monthly recurrence at least one month in advance. + > > Otherwise, the workflow might skip the first recurrence. > > If a recurrence doesn't specify a specific [start date and time](#start-time), the first recurrence runs immediately @@ -66,7 +68,7 @@ Here are the differences between these triggers: > > If a recurrence doesn't specify any other advanced scheduling options such as specific times to run future recurrences, > those recurrences are based on the last run time. As a result, the start times for those recurrences might drift due to - > factors such as latency during storage calls. To make sure that your logic app doesn't miss a recurrence, especially when + > factors such as latency during storage calls. To make sure that your workflow doesn't miss a recurrence, especially when > the frequency is in days or longer, try these options: > > * Provide a start date and time for the recurrence plus the specific times when to run subsequent recurrences by using the properties @@ -100,7 +102,7 @@ Here are some patterns that show how you can control recurrence with the start d |------------|-----------------------------|----------------------------------------------------| | {none} | Runs the first workload instantly.

    Runs future workloads based on the last run time. | Runs the first workload instantly.

    Runs future workloads based on the specified schedule. | | Start time in the past | **Recurrence** trigger: Calculates run times based on the specified start time and discards past run times.

    Runs the first workload at the next future run time.

    Runs future workloads based on the last run time.

    **Sliding Window** trigger: Calculates run times based on the specified start time and honors past run times.

    Runs future workloads based on the specified start time.

    For more explanation, see the example following this table. | Runs the first workload *no sooner* than the start time, based on the schedule calculated from the start time.

    Runs future workloads based on the specified schedule.

    **Note:** If you specify a recurrence with a schedule, but don't specify hours or minutes for the schedule, Azure Logic Apps calculates future run times by using the hours or minutes, respectively, from the first run time. | -| Start time now or in the future | Runs the first workload at the specified start time.

    **Recurrence** trigger: Runs future workloads based on the last run time.

    **Sliding Window** trigger: Runs future workloads based on the specified start time. | Runs the first workload *no sooner* than the start time, based on the schedule calculated from the start time.

    Runs future workloads based on the specified schedule. If you use the **Day** or **Week** frequency and specify a future date and time, make sure that you set up the recurrence in advance:

    - **Day**: Set up the daily recurrence at least 24 hours in advance.

    - **Week**: Set up the weekly recurrence at least 7 days in advance.

    Otherwise, the workflow might skip the first recurrence.

    **Note:** If you specify a recurrence with a schedule, but don't specify hours or minutes for the schedule, Azure Logic Apps calculates future run times by using the hours or minutes, respectively, from the first run time. | +| Start time now or in the future | Runs the first workload at the specified start time.

    **Recurrence** trigger: Runs future workloads based on the last run time.

    **Sliding Window** trigger: Runs future workloads based on the specified start time. | Runs the first workload *no sooner* than the start time, based on the schedule calculated from the start time.

    Runs future workloads based on the specified schedule. If you use the **Day**, **Week**, or **Month** frequency, and you specify a future date and time, make sure that you set up the recurrence in advance:

    - **Day**: Set up the daily recurrence at least 24 hours in advance.

    - **Week**: Set up the weekly recurrence at least 7 days in advance.

    - **Month**: Set up the monthly recurrence at least one month in advance.

    Otherwise, the workflow might skip the first recurrence.

    **Note:** If you specify a recurrence with a schedule, but don't specify hours or minutes for the schedule, Azure Logic Apps calculates future run times by using the hours or minutes, respectively, from the first run time. | |||| *Example for past start time and recurrence but no schedule* diff --git a/articles/logic-apps/create-managed-service-identity.md b/articles/logic-apps/create-managed-service-identity.md index d0f0592b4d85..747e55960809 100644 --- a/articles/logic-apps/create-managed-service-identity.md +++ b/articles/logic-apps/create-managed-service-identity.md @@ -707,7 +707,7 @@ As a specific example, suppose that you want to run the [Snapshot Blob operation > [!IMPORTANT] > To access Azure storage accounts behind firewalls by using HTTP requests and managed identities, -> make sure that you also set up your storage account with the [exception that allows access by trusted Microsoft services](../connectors/connectors-create-api-azureblobstorage.md#access-blob-storage-with-managed-identities). +> make sure that you also set up your storage account with the [exception that allows access by trusted Microsoft services](../connectors/connectors-create-api-azureblobstorage.md#access-blob-storage-in-same-region-with-managed-identities). To run the [Snapshot Blob operation](/rest/api/storageservices/snapshot-blob), the HTTP action specifies these properties: diff --git a/articles/logic-apps/logic-apps-enterprise-integration-certificates.md b/articles/logic-apps/logic-apps-enterprise-integration-certificates.md index 432eddc240ea..a838e6c90d8e 100644 --- a/articles/logic-apps/logic-apps-enterprise-integration-certificates.md +++ b/articles/logic-apps/logic-apps-enterprise-integration-certificates.md @@ -24,7 +24,7 @@ You can use the following certificate types in your workflows: * [Public certificates](https://en.wikipedia.org/wiki/Public_key_certificate), which you must purchase from a public internet [certificate authority (CA)](https://en.wikipedia.org/wiki/Certificate_authority). These certificates don't require any keys. -* Private certificates or [*self-signed certificates*](https://en.wikipedia.org/wiki/Self-signed_certificate), which you create and issue yourself. However, these certificates require private keys. +* Private certificates or [*self-signed certificates*](https://en.wikipedia.org/wiki/Self-signed_certificate), which you create and issue yourself. However, these certificates require [private keys in an Azure key vault](#prerequisites). If you're new to logic apps, review [What is Azure Logic Apps](logic-apps-overview.md)? For more information about B2B enterprise integration, review [B2B enterprise integration workflows with Azure Logic Apps and Enterprise Integration Pack](logic-apps-enterprise-integration-overview.md). @@ -58,7 +58,7 @@ If you're new to logic apps, review [What is Azure Logic Apps](logic-apps-overvi [!INCLUDE [updated-for-az](../../includes/updated-for-az.md)] - * [Add a corresponding public certificate](#add-public-certificate) to your key vault. This certificate appears in your [agreement's **Send** and **Receive** settings for signing and encrypting messages](logic-apps-enterprise-integration-agreements.md). For example, review [Reference for AS2 messages settings in Azure Logic Apps](logic-apps-enterprise-integration-as2-message-settings.md). + * [Add the corresponding public certificate](#add-public-certificate) to your key vault. This certificate appears in your [agreement's **Send** and **Receive** settings for signing and encrypting messages](logic-apps-enterprise-integration-agreements.md). For example, review [Reference for AS2 messages settings in Azure Logic Apps](logic-apps-enterprise-integration-as2-message-settings.md). * At least two [trading partners](logic-apps-enterprise-integration-partners.md) and an [agreement between those partners](logic-apps-enterprise-integration-agreements.md) in your integration account. An agreement requires a host partner and a guest partner. Also, an agreement requires that both partners use the same or compatible *business identity* qualifier that's appropriate for an AS2, X12, EDIFACT, or RosettaNet agreement. @@ -66,7 +66,7 @@ If you're new to logic apps, review [What is Azure Logic Apps](logic-apps-overvi -## Add a public certificate +## Use a public certificate To use a *public certificate* in your workflow, you have to first add the certificate to your integration account. @@ -84,7 +84,7 @@ To use a *public certificate* in your workflow, you have to first add the certif |----------|----------|-------|-------------| | **Name** | Yes | <*certificate-name*> | Your certificate's name, which is `publicCert` in this example | | **Certificate Type** | Yes | **Public** | Your certificate's type | - | **Certificate** | Yes | <*certificate-file-name*> | To browse for the certificate file that you want to add, select the folder icon next to the **Certificate** box. | + | **Certificate** | Yes | <*certificate-file-name*> | To browse for the certificate file that you want to add, select the folder icon next to the **Certificate** box. Select the certificate that you want to use. | ||||| ![Screenshot showing the Azure portal and integration account with "Add" selected and the "Add Certificate" pane with public certificate details.](media/logic-apps-enterprise-integration-certificates/public-certificate-details.png) @@ -95,11 +95,11 @@ To use a *public certificate* in your workflow, you have to first add the certif ![Screenshot showing the Azure portal and integration account with the public certificate in the "Certificates" list.](media/logic-apps-enterprise-integration-certificates/new-public-certificate.png) - + -## Add a private certificate +## Use a private certificate -To use a *private certificate* in your workflow, you have to first add the certificate to your integration account. Make sure that you've also met the [prerequisites private certificates](#prerequisites). +To use a *private certificate* in your workflow, you have to first meet the [prerequisites for private keys](#prerequisites), and add a public certificate to your integration account. 1. In the [Azure portal](https://portal.azure.com) search box, enter `integration accounts`, and select **Integration accounts**. @@ -115,7 +115,7 @@ To use a *private certificate* in your workflow, you have to first add the certi |----------|----------|-------|-------------| | **Name** | Yes | <*certificate-name*> | Your certificate's name, which is `privateCert` in this example | | **Certificate Type** | Yes | **Private** | Your certificate's type | - | **Certificate** | Yes | <*certificate-file-name*> | To browse for the certificate file that you want to add, select the folder icon next to the **Certificate** box. In the key vault that contains your private key, the file you add there is the public certificate. | + | **Certificate** | Yes | <*certificate-file-name*> | To browse for the certificate file that you want to add, select the folder icon next to the **Certificate** box. Select the public certificate that corresponds to the private key that's stored in your key vault. | | **Resource Group** | Yes | <*integration-account-resource-group*> | Your integration account's resource group, which is `Integration-Account-RG` in this example | | **Key Vault** | Yes | <*key-vault-name*> | Your key vault name | | **Key name** | Yes | <*key-name*> | Your key name | diff --git a/articles/logic-apps/media/logic-apps-enterprise-integration-certificates/private-certificate-details.png b/articles/logic-apps/media/logic-apps-enterprise-integration-certificates/private-certificate-details.png index 7b89d4714d07..9847bd98d5b6 100644 Binary files a/articles/logic-apps/media/logic-apps-enterprise-integration-certificates/private-certificate-details.png and b/articles/logic-apps/media/logic-apps-enterprise-integration-certificates/private-certificate-details.png differ diff --git a/articles/machine-learning/how-to-configure-auto-train.md b/articles/machine-learning/how-to-configure-auto-train.md index 921b16d2e17a..3338a416dbfa 100644 --- a/articles/machine-learning/how-to-configure-auto-train.md +++ b/articles/machine-learning/how-to-configure-auto-train.md @@ -104,16 +104,15 @@ The following shows two ways of creating an MLTable. ```Python from azure.ai.ml.constants import AssetTypes -from azure.ai.ml import automl -from azure.ai.ml.entities import JobInput +from azure.ai.ml import automl, Input # A. Create MLTable for training data from your local directory -my_training_data_input = JobInput( +my_training_data_input = Input( type=AssetTypes.MLTABLE, path="./data/training-mltable-folder" ) # B. Remote MLTable definition -my_training_data_input = JobInput(type=AssetTypes.MLTABLE, path="azureml://datastores/workspaceblobstore/paths/Classification/Train") +my_training_data_input = Input(type=AssetTypes.MLTABLE, path="azureml://datastores/workspaceblobstore/paths/Classification/Train") ``` ### Training, validation, and test data diff --git a/articles/machine-learning/how-to-create-register-data-assets.md b/articles/machine-learning/how-to-create-register-data-assets.md index 8a08d98323df..01a1b2877798 100644 --- a/articles/machine-learning/how-to-create-register-data-assets.md +++ b/articles/machine-learning/how-to-create-register-data-assets.md @@ -117,7 +117,7 @@ For a complete example, see the [working_with_uris.ipynb notebook](https://githu # [Python-SDK](#tab/Python-SDK) ```python from azure.ai.ml.entities import Data -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml.constants import AssetTypes # select one from: my_path = 'abfss://@.dfs.core.windows.net/' # adls gen2 @@ -171,19 +171,20 @@ path: wasbs://mainstorage9c05dabf5c924.blob.core.windows.net/azureml-blobstore-5 ### Consume registered URI Folder data assets in job ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data +from azure.ai.ml.constants import AssetTypes registered_data_asset = ml_client.data.get(name='titanic', version='1') my_job_inputs = { - "input_data": JobInput( + "input_data": Input( type=AssetTypes.URI_FOLDER, path=registered_data_asset.id ) } -job = CommandJob( +job = command( code="./src", command='python read_data_asset.py --input_folder ${{inputs.input_data}}', inputs=my_job_inputs, @@ -201,7 +202,7 @@ returned_job.services["Studio"].endpoint # [Python-SDK](#tab/Python-SDK) ```python from azure.ai.ml.entities import Data -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml.constants import AssetTypes # select one from: my_file_path = '/' # local @@ -270,7 +271,7 @@ Below we show an example of versioning the sample data in this repo. The data is # [Python-SDK](#tab/Python-SDK) ```python from azure.ai.ml.entities import Data -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml.constants import AssetTypes import mltable my_data = Data( diff --git a/articles/machine-learning/how-to-manage-models.md b/articles/machine-learning/how-to-manage-models.md index e502268d1b85..487cc7f10f85 100644 --- a/articles/machine-learning/how-to-manage-models.md +++ b/articles/machine-learning/how-to-manage-models.md @@ -128,7 +128,7 @@ Use the tabs below to select where your model is located. ```python from azure.ai.ml.entities import Model -from azure.ai.ml._constants import ModelType +from azure.ai.ml.constants import ModelType file_model = Model( path="mlflow-model/model.pkl", @@ -146,7 +146,7 @@ A model can be created from a cloud path using any one of the following supporte ```python from azure.ai.ml.entities import Model -from azure.ai.ml._constants import ModelType +from azure.ai.ml.constants import ModelType cloud_model = Model( path= "azureml://datastores/workspaceblobstore/paths/model.pkl" @@ -173,7 +173,7 @@ Example: ```python from azure.ai.ml.entities import Model -from azure.ai.ml._constants import ModelType +from azure.ai.ml.constants import ModelType run_model = Model( path="runs:/$RUN_ID/model/" @@ -205,7 +205,7 @@ Saving model from a named output: ```python from azure.ai.ml.entities import Model -from azure.ai.ml._constants import ModelType +from azure.ai.ml.constants import ModelType run_model = Model( path="azureml://jobs/$RUN_ID/outputs/artifacts/paths/model/" diff --git a/articles/machine-learning/how-to-read-write-data-v2.md b/articles/machine-learning/how-to-read-write-data-v2.md index 87ef6b535819..82a0ceeb4fc8 100644 --- a/articles/machine-learning/how-to-read-write-data-v2.md +++ b/articles/machine-learning/how-to-read-write-data-v2.md @@ -44,8 +44,8 @@ ml_client = MLClient(InteractiveBrowserCredential(), subscription_id, resource_g ## Read local data in a job -You can use data from your current working directory in a training job with the JobInput class. -The JobInput class allows you to define data inputs from a specific file, `uri_file` or a folder location, `uri_folder`. In the JobInput object, you specify the `path` of where your data is located; the path can be a local path or a cloud path. Azure Machine Learning supports `https://`, `abfss://`, `wasbs://` and `azureml://` URIs. +You can use data from your current working directory in a training job with the Input class. +The Input class allows you to define data inputs from a specific file, `uri_file` or a folder location, `uri_folder`. In the Input object, you specify the `path` of where your data is located; the path can be a local path or a cloud path. Azure Machine Learning supports `https://`, `abfss://`, `wasbs://` and `azureml://` URIs. > [!IMPORTANT] > If the path is local, but your compute is defined to be in the cloud, Azure Machine Learning will automatically upload the data to cloud storage for you. @@ -54,17 +54,18 @@ The JobInput class allows you to define data inputs from a specific file, `uri_f # [Python-SDK](#tab/Python-SDK) ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data +from azure.ai.ml.constants import AssetTypes my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='./sample_data', # change to be your local directory type=AssetTypes.URI_FOLDER ) } -job = CommandJob( +job = command( code="./src", # local path where the code is stored command='python train.py --input_folder ${{inputs.input_data}}', inputs=my_job_inputs, @@ -114,17 +115,18 @@ The following code shows how to read in uri_folder type data from Azure Data Lak ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data +from azure.ai.ml.constants import AssetTypes my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='abfss://@.dfs.core.windows.net/', # Blob: 'https://.blob.core.windows.net//path' type=AssetTypes.URI_FOLDER ) } -job = CommandJob( +job = command( code="./src", # local path where the code is stored command='python train.py --input_folder ${{inputs.input_data}}', inputs=my_job_inputs, @@ -164,7 +166,7 @@ compute: azureml:cpu-cluster You can read and write data from your job into your cloud-based storage. -The JobInput defaults the mode - how the input will be exposed during job runtime - to InputOutputModes.RO_MOUNT (read-only mount). Put another way, Azure Machine Learning will mount the file or folder to the compute and set the file/folder to read-only. By design, you can't write to JobInputs only JobOutputs. The data is automatically uploaded to cloud storage. +The Input defaults the mode - how the input will be exposed during job runtime - to InputOutputModes.RO_MOUNT (read-only mount). Put another way, Azure Machine Learning will mount the file or folder to the compute and set the file/folder to read-only. By design, you can't write to JobInputs only JobOutputs. The data is automatically uploaded to cloud storage. Matrix of possible types and modes for job inputs and outputs: @@ -185,11 +187,12 @@ As you can see from the table, `eval_download` and `eval_mount` are unique to `m # [Python-SDK](#tab/Python-SDK) ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob, JobOutput -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, JobOutput +from azure.ai.ml.constants import AssetTypes my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='abfss://@.dfs.core.windows.net/', type=AssetTypes.URI_FOLDER ) @@ -202,7 +205,7 @@ my_job_outputs = { ) } -job = CommandJob( +job = command( code="./src", #local path where the code is stored command='python pre-process.py --input_folder ${{inputs.input_data}} --output_folder ${{outputs.output_folder}}', inputs=my_job_inputs, @@ -255,7 +258,7 @@ The following example demonstrates versioning of sample data, and shows how to r ```python from azure.ai.ml.entities import Data -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml.constants import AssetTypes my_data = Data( path="./sample_data/titanic.csv", @@ -272,7 +275,7 @@ To register data that is in a cloud location, you can specify the path with any ```python from azure.ai.ml.entities import Data -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml.constants import AssetTypes my_path = 'abfss://@.dfs.core.windows.net/' # adls gen2 @@ -295,19 +298,20 @@ The following example demonstrates how to consume `version` 1 of the registered ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data +from azure.ai.ml.constants import AssetTypes registered_data_asset = ml_client.data.get(name='titanic', version='1') my_job_inputs = { - "input_data": JobInput( + "input_data": Input( type=AssetTypes.URI_FOLDER, path=registered_data_asset.id ) } -job = CommandJob( +job = command( code="./src", command='python read_data_asset.py --input_folder ${{inputs.input_data}}', inputs=my_job_inputs, diff --git a/articles/machine-learning/how-to-use-data.md b/articles/machine-learning/how-to-use-data.md index acc3e1f7aa7c..792ed19fef41 100644 --- a/articles/machine-learning/how-to-use-data.md +++ b/articles/machine-learning/how-to-use-data.md @@ -85,17 +85,18 @@ Use the tabs below to select where your data is located. When you pass local data, the data is automatically uploaded to cloud storage as part of the job submission. ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data +from azure.ai.ml.constants import AssetTypes my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='./sample_data', # change to be your local directory type=AssetTypes.URI_FOLDER ) } -job = CommandJob( +job = command( code="./src", # local path where the code is stored command='python train.py --input_folder ${{inputs.input_data}}', inputs=my_job_inputs, @@ -112,18 +113,19 @@ returned_job.services["Studio"].endpoint # [ADLS Gen2](#tab/use-adls) ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, CommandJob +from azure.ai.ml.constants import AssetTypes # in this example we my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='abfss://@.dfs.core.windows.net/', type=AssetTypes.URI_FOLDER ) } -job = CommandJob( +job = command( code="./src", # local path where the code is stored command='python train.py --input_folder ${{inputs.input_data}}', inputs=my_job_inputs, @@ -140,18 +142,19 @@ returned_job.services["Studio"].endpoint # [Blob](#tab/use-blob) ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, CommandJob +from azure.ai.ml.constants import AssetTypes # in this example we my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='https://.blob.core.windows.net//path', type=AssetTypes.URI_FOLDER ) } -job = CommandJob( +job = command( code="./src", # local path where the code is stored command='python train.py --input_folder ${{inputs.input_data}}', inputs=my_job_inputs, @@ -174,11 +177,12 @@ Use the tabs below to select where your data is located. # [Blob](#tab/rw-blob) ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob, JobOutput -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, CommandJob, JobOutput +from azure.ai.ml.constants import AssetTypes my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='https://.blob.core.windows.net//path', type=AssetTypes.URI_FOLDER ) @@ -191,7 +195,7 @@ my_job_outputs = { ) } -job = CommandJob( +job = command( code="./src", #local path where the code is stored command='python pre-process.py --input_folder ${{inputs.input_data}} --output_folder ${{outputs.output_folder}}', inputs=my_job_inputs, @@ -209,11 +213,12 @@ returned_job.services["Studio"].endpoint # [ADLS Gen2](#tab/rw-adls) ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob, JobOutput -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, CommandJob, JobOutput +from azure.ai.ml.constants import AssetTypes my_job_inputs = { - "input_data": JobInput( + "input_data": Input( path='abfss://@.dfs.core.windows.net/', type=AssetTypes.URI_FOLDER ) @@ -226,7 +231,7 @@ my_job_outputs = { ) } -job = CommandJob( +job = command( code="./src", #local path where the code is stored command='python pre-process.py --input_folder ${{inputs.input_data}} --output_folder ${{outputs.output_folder}}', inputs=my_job_inputs, @@ -246,7 +251,7 @@ returned_job.services["Studio"].endpoint ```python from azure.ai.ml.entities import Data -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml.constants import AssetTypes # select one from: my_path = 'abfss://@.dfs.core.windows.net/' # adls gen2 @@ -266,19 +271,20 @@ ml_client.data.create_or_update(my_data) ### Consume registered data assets in job ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, Input, CommandJob +from azure.ai.ml.constants import AssetTypes registered_data_asset = ml_client.data.get(name='titanic', version='1') my_job_inputs = { - "input_data": JobInput( + "input_data": Input( type=AssetTypes.URI_FOLDER, path=registered_data_asset.id ) } -job = CommandJob( +job = command( code="./src", command='python read_data_asset.py --input_folder ${{inputs.input_data}}', inputs=my_job_inputs, @@ -342,20 +348,21 @@ inputs: The following example shows how to do this using the v2 SDK: ```python -from azure.ai.ml.entities import Data, UriReference, JobInput, CommandJob -from azure.ai.ml._constants import AssetTypes +from azure.ai.ml import Input, command +from azure.ai.ml.entities import Data, CommandJob +from azure.ai.ml.constants import AssetTypes registered_v1_data_asset = ml_client.data.get(name='', version='') my_job_inputs = { - "input_data": JobInput( + "input_data": Input( type=AssetTypes.MLTABLE, path=registered_v1_data_asset.id, mode="eval_mount" ) } -job = CommandJob( +job = command( code="./src", #local path where the code is stored command='python train.py --input_data ${{inputs.input_data}}', inputs=my_job_inputs, diff --git a/articles/machine-learning/media/how-to-create-labeling-projects/add-label.png b/articles/machine-learning/media/how-to-create-labeling-projects/add-label.png index f9c62a713a07..a14b2cc6c804 100644 Binary files a/articles/machine-learning/media/how-to-create-labeling-projects/add-label.png and b/articles/machine-learning/media/how-to-create-labeling-projects/add-label.png differ diff --git a/articles/machine-learning/media/how-to-create-labeling-projects/exported-dataset.png b/articles/machine-learning/media/how-to-create-labeling-projects/exported-dataset.png index b14c4504ab74..5146a6d826de 100644 Binary files a/articles/machine-learning/media/how-to-create-labeling-projects/exported-dataset.png and b/articles/machine-learning/media/how-to-create-labeling-projects/exported-dataset.png differ diff --git a/articles/machine-learning/media/how-to-create-labeling-projects/labeling-creation-wizard.png b/articles/machine-learning/media/how-to-create-labeling-projects/labeling-creation-wizard.png index f9ed5d201151..408b9c63ea1a 100644 Binary files a/articles/machine-learning/media/how-to-create-labeling-projects/labeling-creation-wizard.png and b/articles/machine-learning/media/how-to-create-labeling-projects/labeling-creation-wizard.png differ diff --git a/articles/machine-learning/media/how-to-create-text-labeling-projects/text-labeling-creation-wizard.png b/articles/machine-learning/media/how-to-create-text-labeling-projects/text-labeling-creation-wizard.png index 4228a66b48b8..2126b41c0d75 100644 Binary files a/articles/machine-learning/media/how-to-create-text-labeling-projects/text-labeling-creation-wizard.png and b/articles/machine-learning/media/how-to-create-text-labeling-projects/text-labeling-creation-wizard.png differ diff --git a/articles/machine-learning/media/how-to-create-your-first-pipeline/pipelines.png b/articles/machine-learning/media/how-to-create-your-first-pipeline/pipelines.png index 99522098e04e..153333648aec 100644 Binary files a/articles/machine-learning/media/how-to-create-your-first-pipeline/pipelines.png and b/articles/machine-learning/media/how-to-create-your-first-pipeline/pipelines.png differ diff --git a/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/download-output-logs.png b/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/download-output-logs.png index a50581366242..0441adadd5ba 100644 Binary files a/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/download-output-logs.png and b/articles/machine-learning/media/how-to-deploy-mlflow-models-online-endpoints/download-output-logs.png differ diff --git a/articles/machine-learning/media/how-to-deploy-with-triton/create-option-from-endpoints-page.png b/articles/machine-learning/media/how-to-deploy-with-triton/create-option-from-endpoints-page.png index 4f757eaec7c3..118954fa0eb5 100644 Binary files a/articles/machine-learning/media/how-to-deploy-with-triton/create-option-from-endpoints-page.png and b/articles/machine-learning/media/how-to-deploy-with-triton/create-option-from-endpoints-page.png differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift-config-expanded.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift-config-expanded.png deleted file mode 100644 index 97a7d45b479f..000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift-config-expanded.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift-config.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift-config.png deleted file mode 100644 index 56cff5bf0a47..000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift-config.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift-ui-expanded.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift-ui-expanded.png deleted file mode 100644 index 8fc3c03d6be4..000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift-ui-expanded.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift-ui.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift-ui.png deleted file mode 100644 index 6c237c9868a8..000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift-ui.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift_config.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift_config.png deleted file mode 100644 index 72889fb3c6ec..000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift_config.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift_email.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift_email.png deleted file mode 100644 index e3261e50c639..000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift_email.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift_show.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift_show.png deleted file mode 100644 index dacb13bbbdad..000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift_show.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-monitor-data-drift/drift_ui.png b/articles/machine-learning/media/how-to-monitor-data-drift/drift_ui.png deleted file mode 100644 index 53391cfa4d62..000000000000 Binary files a/articles/machine-learning/media/how-to-monitor-data-drift/drift_ui.png and /dev/null differ diff --git a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details.png b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details.png index b73a5170ae89..ada14872a10c 100644 Binary files a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details.png and b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details.png differ diff --git a/articles/machine-learning/media/how-to-use-event-grid/specify-adf-pipeline.png b/articles/machine-learning/media/how-to-use-event-grid/specify-adf-pipeline.png index e0497a5eaf4a..ca3242a0ad6d 100644 Binary files a/articles/machine-learning/media/how-to-use-event-grid/specify-adf-pipeline.png and b/articles/machine-learning/media/how-to-use-event-grid/specify-adf-pipeline.png differ diff --git a/articles/machine-learning/media/how-to-version-track-datasets/dataset-models.png b/articles/machine-learning/media/how-to-version-track-datasets/dataset-models.png index a2900057df23..94874ca23fa8 100644 Binary files a/articles/machine-learning/media/how-to-version-track-datasets/dataset-models.png and b/articles/machine-learning/media/how-to-version-track-datasets/dataset-models.png differ diff --git a/articles/machine-learning/media/migrate-rebuild-web-service/test-realtime-endpoint.png b/articles/machine-learning/media/migrate-rebuild-web-service/test-realtime-endpoint.png index 862732a9b783..35d1a2a6c096 100644 Binary files a/articles/machine-learning/media/migrate-rebuild-web-service/test-realtime-endpoint.png and b/articles/machine-learning/media/migrate-rebuild-web-service/test-realtime-endpoint.png differ diff --git a/articles/machine-learning/media/overview-what-is-azure-ml-studio/azure-machine-learning-automated-ml-ui.jpg b/articles/machine-learning/media/overview-what-is-azure-ml-studio/azure-machine-learning-automated-ml-ui.jpg index 9a2cd6985f50..3128147aafae 100644 Binary files a/articles/machine-learning/media/overview-what-is-azure-ml-studio/azure-machine-learning-automated-ml-ui.jpg and b/articles/machine-learning/media/overview-what-is-azure-ml-studio/azure-machine-learning-automated-ml-ui.jpg differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-sub-folder.png b/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-sub-folder.png index 28dd6be826c4..3e5e2ca6e357 100644 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-sub-folder.png and b/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-sub-folder.png differ diff --git a/articles/machine-learning/media/tutorial-1st-r-experiment/clone-folder.png b/articles/machine-learning/media/tutorial-1st-r-experiment/clone-folder.png deleted file mode 100644 index 16184d85c3ec..000000000000 Binary files a/articles/machine-learning/media/tutorial-1st-r-experiment/clone-folder.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-1st-r-experiment/rstudio.png b/articles/machine-learning/media/tutorial-1st-r-experiment/rstudio.png deleted file mode 100644 index 2d6c6dcc52b7..000000000000 Binary files a/articles/machine-learning/media/tutorial-1st-r-experiment/rstudio.png and /dev/null differ diff --git a/articles/machine-learning/media/tutorial-create-secure-workspace/studio-new-compute-cluster.png b/articles/machine-learning/media/tutorial-create-secure-workspace/studio-new-compute-cluster.png index 6fe38ad38728..e9b88bfc9c10 100644 Binary files a/articles/machine-learning/media/tutorial-create-secure-workspace/studio-new-compute-cluster.png and b/articles/machine-learning/media/tutorial-create-secure-workspace/studio-new-compute-cluster.png differ diff --git a/articles/machine-learning/reference-yaml-overview.md b/articles/machine-learning/reference-yaml-overview.md index da1c1422a97a..0d945d05a50f 100644 --- a/articles/machine-learning/reference-yaml-overview.md +++ b/articles/machine-learning/reference-yaml-overview.md @@ -53,7 +53,7 @@ The Azure Machine Learning CLI (v2), an extension to the Azure CLI, often uses a | [Compute cluster (AmlCompute)](reference-yaml-compute-aml.md) | https://azuremlschemas.azureedge.net/latest/amlCompute.schema.json | | [Compute instance](reference-yaml-compute-instance.md) | https://azuremlschemas.azureedge.net/latest/computeInstance.schema.json | | [Attached Virtual Machine](reference-yaml-compute-vm.md) | https://azuremlschemas.azureedge.net/latest/vmCompute.schema.json | -| [Attached Azure Arc-enabled Kubernetes (KubernetesCompute)](reference-yaml-compute-kubernetes.md) | https://azuremlschemas.azureedge.net/latest/kubernetesCompute.schema.json | +| [Attached Azure Arc-enabled Kubernetes (KubernetesCompute)](reference-yaml-compute-kubernetes.md) | `https://azuremlschemas.azureedge.net/latest/kubernetesCompute.schema.json` | ## Job diff --git a/articles/machine-learning/toc.yml b/articles/machine-learning/toc.yml index e9357e8a0aaa..ff0ab4955db1 100644 --- a/articles/machine-learning/toc.yml +++ b/articles/machine-learning/toc.yml @@ -373,6 +373,22 @@ - name: Administrate data authentication displayName: data authentication href: how-to-administrate-data-authentication.md + - name: Label data + items: + - name: Set up image labeling + displayName: data, dataset + href: how-to-create-image-labeling-projects.md + - name: Set up text labeling + displayName: data, dataset + href: how-to-create-text-labeling-projects.md + - name: Label images and text + displayName: data, dataset, labeling + href: how-to-label-data.md + - name: Add users + displayName: data, dataset, labeling + href: how-to-add-users.md + - name: Outsource labeling tasks + href: how-to-outsource-data-labeling.md - name: Train models items: - name: Train with the job creation UI diff --git a/articles/machine-learning/v1/toc.yml b/articles/machine-learning/v1/toc.yml index 782b141a1522..c8ed44e7f035 100644 --- a/articles/machine-learning/v1/toc.yml +++ b/articles/machine-learning/v1/toc.yml @@ -129,22 +129,6 @@ - name: Version & track datasets displayName: data, data set href: ../how-to-version-track-datasets.md - - name: Label data - items: - - name: Set up image labeling - displayName: data, dataset - href: ../how-to-create-image-labeling-projects.md - - name: Set up text labeling - displayName: data, dataset - href: ../how-to-create-text-labeling-projects.md - - name: Label images and text - displayName: data, dataset, labeling - href: ../how-to-label-data.md - - name: Add users - displayName: data, dataset, labeling - href: ../how-to-add-users.md - - name: Outsource labeling tasks - href: ../how-to-outsource-data-labeling.md - name: Create datasets with labels displayName: data, labels, torchvision href: ../how-to-use-labeled-dataset.md diff --git a/articles/managed-instance-apache-cassandra/TOC.yml b/articles/managed-instance-apache-cassandra/TOC.yml index 35359b9ad0f4..c4e57b150faf 100644 --- a/articles/managed-instance-apache-cassandra/TOC.yml +++ b/articles/managed-instance-apache-cassandra/TOC.yml @@ -42,6 +42,8 @@ href: add-service-principal.md - name: Configure Customer-Managed Keys href: customer-managed-keys.md + - name: Enable LDAP authentication + href: ldap.md - name: Monitor Managed Instance href: monitor-clusters.md - name: Manage with Azure CLI diff --git a/articles/managed-instance-apache-cassandra/dba-commands.md b/articles/managed-instance-apache-cassandra/dba-commands.md index 502d2468ab4e..89a383a30fe7 100644 --- a/articles/managed-instance-apache-cassandra/dba-commands.md +++ b/articles/managed-instance-apache-cassandra/dba-commands.md @@ -13,22 +13,19 @@ ms.author: thvankra Azure Managed Instance for Apache Cassandra provides automated deployment, scaling, and [management operations](management-operations.md) for open-source Apache Cassandra data centers. The automation in the service should be sufficient for many use cases. However, this article describes how to run DBA commands manually when the need arises. > [!IMPORTANT] -> Nodetool commands are in public preview. +> Nodetool and sstable commands are in public preview. > This feature is provided without a service level agreement, and it's not recommended for production workloads. > For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). - - ## DBA command support -Azure Managed Instance for Apache Cassandra allows you to run `nodetool` commands via Azure CLI, for routine DBA administration. Not all commands are supported and there are some limitations. For supported commands, see the sections below. +Azure Managed Instance for Apache Cassandra allows you to run `nodetool` and `sstable` commands via Azure CLI, for routine DBA administration. Not all commands are supported and there are some limitations. For supported commands, see the sections below. >[!WARNING] > Some of these commands can destabilize the cassandra cluster and should only be run carefully and after being tested in non-production environments. Where possible a `--dry-run` option should be deployed first. Microsoft cannot offer any SLA or support on issues with running commands which alter the default database configuration and/or tables. -## How to run a nodetool command +## How to run a `nodetool` command Azure Managed Instance for Apache Cassandra provides the following Azure CLI command to run DBA commands: ```azurecli-interactive @@ -59,9 +56,9 @@ Both will return a json of the following form: } ``` - +``` - +* `sstableexpiredblockers` -## List of supported nodetool commands +## List of supported `nodetool` commands For more information on each command, see https://cassandra.apache.org/doc/latest/cassandra/tools/nodetool/nodetool.html diff --git a/articles/managed-instance-apache-cassandra/index.yml b/articles/managed-instance-apache-cassandra/index.yml index 5871b7eaab55..099ba2dcdca0 100644 --- a/articles/managed-instance-apache-cassandra/index.yml +++ b/articles/managed-instance-apache-cassandra/index.yml @@ -67,6 +67,8 @@ landingContent: links: - text: Manage resources with Azure CLI url: manage-resources-cli.md + - text: Enable LDAP Authentication + url: ldap.md - text: Monitor cluster resources url: monitor-clusters.md - text: Configure Customer-Managed Keys diff --git a/articles/managed-instance-apache-cassandra/ldap.md b/articles/managed-instance-apache-cassandra/ldap.md new file mode 100644 index 000000000000..956ec96ff64d --- /dev/null +++ b/articles/managed-instance-apache-cassandra/ldap.md @@ -0,0 +1,133 @@ +--- +title: How to enable LDAP authentication in Azure Managed Instance for Apache Cassandra +description: Learn how to enable LDAP authentication in Azure Managed Instance for Apache Cassandra +author: TheovanKraay +ms.author: thvankra +ms.service: managed-instance-apache-cassandra +ms.topic: how-to +ms.date: 05/23/2022 +--- + +# How to enable LDAP authentication in Azure Managed Instance for Apache Cassandra + +Azure Managed Instance for Apache Cassandra provides automated deployment and scaling operations for managed open-source Apache Cassandra data centers. This article discusses how to enable LDAP authentication to your clusters and data centers. + +> [!IMPORTANT] +> LDAP authentication is in public preview. +> This feature is provided without a service level agreement, and it's not recommended for production workloads. +> For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + +## Prerequisites + +- If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. +- An Azure Managed Instance for Apache Cassandra cluster. Review how to [create an Azure Managed Instance for Apache Cassandra cluster from the Azure portal](create-cluster-portal.md). + +## Deploy an LDAP Server in Azure +In this section, we'll walk through creating a simple LDAP server on a Virtual Machine in Azure. If you already have an LDAP server running, you can skip this section and review [how to enable LDAP authentication](ldap.md#enable-ldap-authentication). + +1. Deploy a Virtual Machine in Azure using Ubuntu Server 18.04 LTS. You can follow instructions [here](visualize-prometheus-grafana.md#deploy-an-ubuntu-server). + +1. Give your server a DNS name: + + :::image type="content" source="./media/ldap/dns.jpg" alt-text="Screenshot of virtual machine d n s name in Azure portal." lightbox="./media/ldap/dns.jpg" border="true"::: + +1. Install Docker on the virtual machine. We recommend [this](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-18-04) tutorial. + +1. In the home directory, copy and paste the following text and hit enter. This command will create a file containing a test LDAP user account. + + ```shell + mkdir ldap-user && cd ldap-user && cat >> user.ldif <` with the dns name you created for your LDAP server earlier. This command will deploy an LDAP server with TLS enabled to a Docker container, and will also copy the user file you created earlier to the container. + + ```shell + sudo docker run --hostname .uksouth.cloudapp.azure.com --name -v $(pwd)/ldap-user:/container/service/slapd/assets/test --detach osixia/openldap:1.5.0 + ``` + +1. Now copy out the certificates folder from the container (replace `` with the dns name you created for your LDAP server): + + ```shell + sudo docker cp :/container/service/slapd/assets/certs certs + ``` + +1. Verify that dns name is correct: + + ```shell + openssl x509 -in certs/ldap.crt -text + ``` + :::image type="content" source="./media/ldap/dns-verify.jpg" alt-text="Screenshot of output from command to verify certificate." lightbox="./media/ldap/dns-verify.jpg" border="true"::: + +1. Copy the `ldap.crt` file to [clouddrive](../cloud-shell/persisting-shell-storage.md) in Azure CLI for use later. + +1. Add the user to the ldap (replace `` with the dns name you created for your LDAP server): + + ```shell + sudo docker container exec ldapadd -H ldap://.uksouth.cloudapp.azure.com -D "cn=admin,dc=example,dc=org" -w admin -f /container/service/slapd/assets/test/user.ldif + ``` + +## Enable LDAP authentication + +> [!IMPORTANT] +> If you skipped the above section because you already have an existing LDAP server, please ensure that it has server SSL certificates enabled. The `subject alternative name (dns name)` specified for the certificate must also match the domain of the server that LDAP is hosted on, or authentication will fail. + +1. Currently, LDAP authentication is a public preview feature. Run the below command to add the required Azure CLI extension: + + ```azurecli-interactive + az extension add --upgrade --name cosmosdb-preview + ``` + +1. Set authentication method to "Ldap" on the cluster, replacing `` and `` with the appropriate values: + + ```azurecli-interactive + az managed-cassandra cluster update -g -c --authentication-method "Ldap" + ``` + +1. Now set properties at the data center level. Replace `` and `` with the appropriate values, and `` with the dns name you created for your LDAP server. + + > [!NOTE] + > The below command is based on the LDAP setup in the earlier section. If you skipped that section because you already have an existing LDAP server, provide the corresponding values for that server instead. Ensure you have uploaded a certificate file like `ldap.crt` to your [clouddrive](../cloud-shell/persisting-shell-storage.md) in Azure CLI. + + ```azurecli-interactive + ldap_search_base_distinguished_name='dc=example,dc=org' + ldap_server_certificates='/usr/csuser/clouddrive/ldap.crt' + ldap_server_hostname='.uksouth.cloudapp.azure.com' + ldap_service_user_distinguished_name='cn=admin,dc=example,dc=org' + ldap_service_user_password='admin' + + az managed-cassandra datacenter update -g `` -c `` -d datacenter-1 --ldap-search-base-dn $ldap_search_base_distinguished_name --ldap-server-certs $ldap_server_certificates --ldap-server-hostname $ldap_server_hostname --ldap-service-user-dn $ldap_service_user_distinguished_name --ldap-svc-user-pwd $ldap_service_user_password + ``` + +1. Once this command has completed, you should be able to use [CQLSH](https://cassandra.apache.org/doc/latest/cassandra/tools/cqlsh.html) (see below) or any Apache Cassandra open-source client driver to connect to your managed instance data center with the user added in the above step: + + ```shell + export SSL_VALIDATE=false + cqlsh --debug --ssl -u -p + ``` + +## Next steps + +* [LDAP authentication with Azure Active Directory](../active-directory/fundamentals/auth-ldap.md) +* [Manage Azure Managed Instance for Apache Cassandra resources using Azure CLI](manage-resources-cli.md) +* [Deploy a Managed Apache Spark Cluster with Azure Databricks](deploy-cluster-databricks.md) \ No newline at end of file diff --git a/articles/managed-instance-apache-cassandra/media/ldap/dns-verify.jpg b/articles/managed-instance-apache-cassandra/media/ldap/dns-verify.jpg new file mode 100644 index 000000000000..d1ec71ccdde2 Binary files /dev/null and b/articles/managed-instance-apache-cassandra/media/ldap/dns-verify.jpg differ diff --git a/articles/managed-instance-apache-cassandra/media/ldap/dns.jpg b/articles/managed-instance-apache-cassandra/media/ldap/dns.jpg new file mode 100644 index 000000000000..b1cecfc1be3c Binary files /dev/null and b/articles/managed-instance-apache-cassandra/media/ldap/dns.jpg differ diff --git a/articles/mysql/TOC.yml b/articles/mysql/TOC.yml index d4733e11fe3f..bc22384d5e8a 100644 --- a/articles/mysql/TOC.yml +++ b/articles/mysql/TOC.yml @@ -40,6 +40,8 @@ - name: ARM template displayName: Resource Manager href: flexible-server/quickstart-create-arm-template.md + - name: Terraform + href: flexible-server/quickstart-create-terraform.md - name: Create and manage databases href: flexible-server/how-to-create-manage-databases.md - name: Connect and query diff --git a/articles/mysql/flexible-server/concepts-data-in-replication.md b/articles/mysql/flexible-server/concepts-data-in-replication.md index e4e023b14c9d..3fbd9235fda4 100644 --- a/articles/mysql/flexible-server/concepts-data-in-replication.md +++ b/articles/mysql/flexible-server/concepts-data-in-replication.md @@ -48,7 +48,7 @@ Modifying the parameter `replicate_wild_ignore_table` used to create replication - The source server version must be at least MySQL version 5.7. - Our recommendation is to have the same version for source and replica server versions. For example, both must be MySQL version 5.7 or both must be MySQL version 8.0. -- Our recommendation is to have a primary key in each table. If we have table without primary key, you might face slowness in replication. To create primary keys for tables you can use [invisible column](https://dev.mysql.com/doc/refman/8.0/en/invisible-columns.html) if your MySQL version is greater than 8.0.23. +- Our recommendation is to have a primary key in each table. If we have table without primary key, you might face slowness in replication. To create primary keys for tables you can use [invisible column](https://dev.mysql.com/doc/refman/8.0/en/create-table-gipks.html) if your MySQL version is greater than 8.0.23 `(ALTER TABLE ADD COLUMN bigint AUTO_INCREMENT INVISIBLE PRIMARY KEY;)`. - The source server should use the MySQL InnoDB engine. - User must have permissions to configure binary logging and create new users on the source server. - Binary log files on the source server shouldn't be purged before the replica applies those changes. If the source is Azure Database for MySQL refer how to configure binlog_expire_logs_seconds for [Flexible server](./concepts-server-parameters.md#binlog_expire_logs_seconds) or [Single server](../concepts-server-parameters.md#binlog_expire_logs_seconds) diff --git a/articles/mysql/flexible-server/concepts-high-availability.md b/articles/mysql/flexible-server/concepts-high-availability.md index 6edfadbe2542..911445bb145d 100644 --- a/articles/mysql/flexible-server/concepts-high-availability.md +++ b/articles/mysql/flexible-server/concepts-high-availability.md @@ -63,7 +63,7 @@ Automatic backups, both snapshots and log backups, are performed on locally redu >[!Note] >For both zone-redundant and same-zone HA: ->* If there's a failure, the time needed for the standby replica to take over the role of primary depends on the binary log application on the standby. So we recommend that you use primary keys on all tables to reduce failover time. Failover times are typically between 60 and 120 seconds.To create primary keys for tables you can use [invisible column](https://dev.mysql.com/doc/refman/8.0/en/invisible-columns.html) if your MySQL version is greater than 8.0.23. +>* If there's a failure, the time needed for the standby replica to take over the role of primary depends on the binary log application on the standby. So we recommend that you use primary keys on all tables to reduce failover time. Failover times are typically between 60 and 120 seconds.To create primary keys for tables you can use [invisible column](https://dev.mysql.com/doc/refman/8.0/en/create-table-gipks.html) if your MySQL version is greater than 8.0.23 `(ALTER TABLE
    ADD COLUMN bigint AUTO_INCREMENT INVISIBLE PRIMARY KEY;)`. >* The standby server isn't available for read or write operations. It's a passive standby to enable fast failover. >* Always use a fully qualified domain name (FQDN) to connect to your primary server. Avoid using an IP address to connect. If there's a failover, after the primary and standby server roles are switched, a DNS A record might change. That change would prevent the application from connecting to the new primary server if an IP address is used in the connection string. diff --git a/articles/mysql/flexible-server/how-to-deploy-on-azure-free-account.md b/articles/mysql/flexible-server/how-to-deploy-on-azure-free-account.md index e2eb3538d939..4bff566ab248 100644 --- a/articles/mysql/flexible-server/how-to-deploy-on-azure-free-account.md +++ b/articles/mysql/flexible-server/how-to-deploy-on-azure-free-account.md @@ -29,7 +29,7 @@ To complete this tutorial, you need: ## Create an Azure Database for MySQL - Flexible Server -In this article, you'll use the Azure portal to create a Flexible Server with public access connectivity method. Alternatively, refer the respective quickstarts to create a Flexible Server using [Azure CLI](./quickstart-create-server-cli.md) or [ARM template](./quickstart-create-arm-template.md), or [within a VNET](./quickstart-create-connect-server-vnet.md). +In this article, you'll use the Azure portal to create a Flexible Server with public access connectivity method. Alternatively, refer to the respective quickstarts to create a Flexible Server using [Azure CLI](./quickstart-create-server-cli.md), [ARM template](./quickstart-create-arm-template.md), [Terraform](./quickstart-create-terraform.md), or [within a VNET](./quickstart-create-connect-server-vnet.md). 1. Sign in to the [Azure portal](https://portal.azure.com/) with your Azure free account. diff --git a/articles/mysql/flexible-server/how-to-read-replicas-cli.md b/articles/mysql/flexible-server/how-to-read-replicas-cli.md index f029867bce9a..3a15d435e415 100644 --- a/articles/mysql/flexible-server/how-to-read-replicas-cli.md +++ b/articles/mysql/flexible-server/how-to-read-replicas-cli.md @@ -62,7 +62,7 @@ az mysql flexible-server replica list --server-name mydemoserver --resource-grou Replication to a read replica server can be stopped using the following command: ```azurecli-interactive -az mysql flexible-server replica stop-replication --replica-name mydemoreplicaserver --resource-group myresourcegroup +az mysql flexible-server replica stop-replication --name mydemoreplicaserver --resource-group myresourcegroup ``` ### Delete a replica server diff --git a/articles/mysql/flexible-server/quickstart-create-arm-template.md b/articles/mysql/flexible-server/quickstart-create-arm-template.md index ed4bd8fee7dc..a873efd9ff65 100644 --- a/articles/mysql/flexible-server/quickstart-create-arm-template.md +++ b/articles/mysql/flexible-server/quickstart-create-arm-template.md @@ -12,9 +12,9 @@ ms.date: 10/23/2020 # Quickstart: Use an ARM template to create an Azure Database for MySQL - Flexible Server -[[!INCLUDE[applies-to-mysql-flexible-server](../includes/applies-to-mysql-flexible-server.md)] +[!INCLUDE [applies-to-mysql-flexible-server](../includes/applies-to-mysql-flexible-server.md)] -Azure Database for MySQL - Flexible Server is a managed service that you use to run, manage, and scale highly available MySQL databases in the cloud. You can use an Azure Resource Manager template (ARM template) to provision a flexible server to deploy multiple servers or multiple databases on a server. +[!INCLUDE [About Azure Database for MySQL - Flexible Server](../includes/azure-database-for-mysql-flexible-server-abstract.md)] [!INCLUDE [About Azure Resource Manager](../../../includes/resource-manager-quickstart-introduction.md)] diff --git a/articles/mysql/flexible-server/quickstart-create-server-cli.md b/articles/mysql/flexible-server/quickstart-create-server-cli.md index 335495b20d84..f84738ca5140 100644 --- a/articles/mysql/flexible-server/quickstart-create-server-cli.md +++ b/articles/mysql/flexible-server/quickstart-create-server-cli.md @@ -51,7 +51,7 @@ Create an [Azure resource group](../../azure-resource-manager/management/overvie az group create --name myresourcegroup --location eastus2 ``` -Create a flexible server with the `az mysql flexible-server create` command. A server can contain multiple databases. The following command creates a server using service defaults and values from your Azure CLI's [local context](/cli/azure/local-context): +Create a flexible server with the `az mysql flexible-server create` command. A server can contain multiple databases. The following command creates a server using service defaults and values from your Azure CLI's local context: ```azurecli-interactive az mysql flexible-server create diff --git a/articles/mysql/flexible-server/quickstart-create-terraform.md b/articles/mysql/flexible-server/quickstart-create-terraform.md new file mode 100644 index 000000000000..438253b71f0b --- /dev/null +++ b/articles/mysql/flexible-server/quickstart-create-terraform.md @@ -0,0 +1,128 @@ +--- +title: 'Quickstart: Use Terraform to create an Azure Database for MySQL - Flexible Server' +description: Learn how to deploy a database for Azure Database for MySQL Flexible Server using Terraform +author: tomarchermsft +ms.service: mysql +ms.subservice: flexible-server +ms.topic: quickstart +ms.custom: devx-track-terraform +ms.author: tarcher +ms.date: 5/27/2022 +--- + +# Quickstart: Use Terraform to create an Azure Database for MySQL - Flexible Server + +[!INCLUDE [applies-to-mysql-flexible-server](../includes/applies-to-mysql-flexible-server.md)] + +Article tested with the following Terraform and Terraform provider versions: + +- [Terraform v1.2.1](https://releases.hashicorp.com/terraform/) +- [AzureRM Provider v.2.99.0](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs) + +[!INCLUDE [Terraform abstract](~/azure-dev-docs-pr/articles/terraform/includes/abstract.md)] + +[!INCLUDE [About Azure Database for MySQL - Flexible Server](../includes/azure-database-for-mysql-flexible-server-abstract.md)] + +In this article, you learn how to deploy an Azure MySQL Flexible Server Database in a virtual network (VNet) using Terraform. + +> [!div class="checklist"] + +> * Create an Azure resource group using [azurerm_resource_group](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/resource_group) +> * Create an Azure VNet using [azurerm_virtual_network](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/virtual_network) +> * Create an Azure subnet using [azurerm_subnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/subnet) +> * Define a private DNS zone within an Azure DNS using [azurerm_private_dns_zone](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/private_dns_zone) +> * Define a private DNS zone VNet link using using [azurerm_private_dns_zone_virtual_network_link](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/private_dns_zone_virtual_network_link) +> * Deploy Flexible Server using [azurerm_mysql_flexible_server](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/mysql_flexible_server) +> * Deploy a database using [azurerm_mysql_flexible_database](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/mysql_flexible_database) + +> [!NOTE] +> The example code in this article is located in the [Azure Terraform GitHub repo](https://github.com/Azure/terraform/tree/master/quickstart/201-mysql-fs-db). + +## Prerequisites + +- [!INCLUDE [flexible-server-free-trial-note](../includes/flexible-server-free-trial-note.md)] + +- [Install and configure Terraform](/azure/developer/terraform/quickstart-configure) + +## Implement the Terraform code + +1. Create a directory in which to test the sample Terraform code and make it the current directory. + +1. Create a file named `providers.tf` and insert the following code: + + [!code-terraform[master](~/terraform_samples/quickstart/201-mysql-fs-db/providers.tf)] + +1. Create a file named `main.tf` and insert the following code: + + [!code-terraform[master](../../../terraform_samples/quickstart/201-mysql-fs-db/main.tf)] + +1. Create a file named `mysql-fs-db.tf` and insert the following code: + + [!code-terraform[master](../../../terraform_samples/quickstart/201-mysql-fs-db/mysql-fs-db.tf)] + +1. Create a file named `variables.tf` and insert the following code: + + [!code-terraform[master](../../../terraform_samples/quickstart/201-mysql-fs-db/variables.tf)] + +1. Create a file named `output.tf` and insert the following code: + + [!code-terraform[master](../../../terraform_samples/quickstart/201-mysql-fs-db/output.tf)] + +## Initialize Terraform + +[!INCLUDE [terraform-init.md](~/azure-dev-docs-pr/articles/terraform/includes/terraform-init.md)] + +## Create a Terraform execution plan + +[!INCLUDE [terraform-plan.md](~/azure-dev-docs-pr/articles/terraform/includes/terraform-plan.md)] + +## Apply a Terraform execution plan + +[!INCLUDE [terraform-apply-plan.md](~/azure-dev-docs-pr/articles/terraform/includes/terraform-apply-plan.md)] + +## Verify the results + +#### [Azure CLI](#tab/azure-cli) + +Run [az mysql flexible-server db show](/cli/azure/mysql/flexible-server/db#az-mysql-flexible-server-db-show) to display the Azure MySQL database. + +```azurecli +az mysql flexible-server db show \ + --resource-group \ + --server-name \ + --database-name +``` + +**Key points:** + +- The values for the ``, ``, and `` are displayed in the `terraform apply` output. You can also run the [terraform output](https://www.terraform.io/cli/commands/output) command to view these output values. + +#### [Azure PowerShell](#tab/azure-powershell) + +Run [Get-AzMySqlFlexibleServerDatabase](/powershell/module/az.mysql/get-azmysqlflexibleserverdatabase) to display the Azure MySQL database. + +```azurepowershell +Get-AzMySqlFlexibleServerDatabase ` + -ResourceGroupName ` + -ServerName ` + -Name +``` + +**Key points:** + +- The values for the ``, ``, and `` are displayed in the `terraform apply` output. You can also run the [terraform output](https://www.terraform.io/cli/commands/output) command to view these output values. + +--- + +## Clean up resources + +[!INCLUDE [terraform-plan-destroy.md](~/azure-dev-docs-pr/articles/terraform/includes/terraform-plan-destroy.md)] + +## Troubleshoot Terraform on Azure + +[Troubleshoot common problems when using Terraform on Azure](/azure/developer/terraform/troubleshoot) + +## Next steps + +> [!div class="nextstepaction"] +> [Connect Azure Database for MySQL Flexible Server with private access](/azure/mysql/flexible-server/quickstart-create-connect-server-vnet) diff --git a/articles/mysql/includes/azure-database-for-mysql-flexible-server-abstract.md b/articles/mysql/includes/azure-database-for-mysql-flexible-server-abstract.md new file mode 100644 index 000000000000..0beb4ed5c947 --- /dev/null +++ b/articles/mysql/includes/azure-database-for-mysql-flexible-server-abstract.md @@ -0,0 +1,6 @@ +--- +ms.topic: include +ms.date: 05/24/2022 +--- + +Azure Database for MySQL - Flexible Server is a managed service that you use to run, manage, and scale highly available MySQL databases in the cloud. You can use an Azure Resource Manager template (ARM template) to provision a flexible server to deploy multiple servers or multiple databases on a server. diff --git a/articles/mysql/single-server/how-to-redirection.md b/articles/mysql/single-server/how-to-redirection.md index 83e8b2bdc33c..d6131ca713b8 100644 --- a/articles/mysql/single-server/how-to-redirection.md +++ b/articles/mysql/single-server/how-to-redirection.md @@ -51,7 +51,7 @@ If you are using an older version of the mysqlnd_azure extension (version 1.0.0- |`on` or `1`|- If the connection does not use SSL on the driver side, no connection will be made. The following error will be returned: *"mysqlnd_azure.enableRedirect is on, but SSL option is not set in connection string. Redirection is only possible with SSL."*
    - If SSL is used on the driver side, but redirection is not supported on the server, the first connection is aborted and the following error is returned: *"Connection aborted because redirection is not enabled on the MySQL server or the network package doesn't meet redirection protocol."*
    - If the MySQL server supports redirection, but the redirected connection failed for any reason, also abort the first proxy connection. Return the error of the redirected connection.| |`preferred` or `2`
    (default value)|- mysqlnd_azure will use redirection if possible.
    - If the connection does not use SSL on the driver side, the server does not support redirection, or the redirected connection fails to connect for any non-fatal reason while the proxy connection is still a valid one, it will fall back to the first proxy connection.| -For successful connection to Azure database for MySQL Single server using `mysqlnd_azure.enableRedirect` you need to follow mandatory steps of combining your root certificate as per the compliance requirements. For more details on please visit [link](./concepts-certificate-rotation.md#do-i-need-to-make-any-changes-on-my-client-to-maintain-connectivity). +For successful connection to Azure database for MySQL Single server using `mysqlnd_azure.enableRedirect` you need to follow mandatory steps of combining your root certificate as per the compliance requirements. For more details please visit [link](./concepts-certificate-rotation.md#do-i-need-to-make-any-changes-on-my-client-to-maintain-connectivity). The subsequent sections of the document will outline how to install the `mysqlnd_azure` extension using PECL and set the value of this parameter. diff --git a/articles/network-watcher/enable-network-watcher-flow-log-settings.md b/articles/network-watcher/enable-network-watcher-flow-log-settings.md new file mode 100644 index 000000000000..c5d0168cdd4e --- /dev/null +++ b/articles/network-watcher/enable-network-watcher-flow-log-settings.md @@ -0,0 +1,78 @@ +--- +title: Enable Azure Network Watcher | Microsoft Docs +description: Learn how to enable Network Watcher. +services: network-watcher +documentationcenter: na +author: v-ssenthilna + +ms.service: network-watcher +ms.topic: article +ms.tgt_pltfrm: na +ms.workload: infrastructure-services +ms.date: 05/11/2022 +ms.author: v-ssenthilna +ms.custom: references_regions, devx-track-azurepowershell +--- +# Enable Azure Network Watcher + +To analyze traffic, you need to have an existing network watcher, or [enable a network watcher](network-watcher-create.md) in each region that you have NSGs that you want to analyze traffic for. Traffic analytics can be enabled for NSGs hosted in any of the [supported regions](supported-region-traffic-analytics.md). + +## Select a network security group + +Before enabling NSG flow logging, you must have a network security group to log flows for. If you don't have a network security group, see [Create a network security group](../virtual-network/manage-network-security-group.md#create-a-network-security-group) to create one. + +In Azure portal, go to **Network watcher**, and then select **NSG flow logs**. Select the network security group that you want to enable an NSG flow log for, as shown in the following picture: + +![Screenshot of portal to select N S G that require enablement of NSG flow log.](./media/traffic-analytics/selection-of-nsgs-that-require-enablement-of-nsg-flow-logging.png) + +If you try to enable traffic analytics for an NSG that is hosted in any region other than the [supported regions](supported-region-traffic-analytics.md), you receive a "Not found" error. + +## Enable flow log settings + +Before enabling flow log settings, you must complete the following tasks: + +Register the Azure Insights provider, if it's not already registered for your subscription: + +```azurepowershell-interactive +Register-AzResourceProvider -ProviderNamespace Microsoft.Insights +``` + +If you don't already have an Azure Storage account to store NSG flow logs in, you must create a storage account. You can create a storage account with the command that follows. Before running the command, replace `` with a name that is unique across all Azure locations, between 3-24 characters in length, using only numbers and lower-case letters. You can also change the resource group name, if necessary. + +```azurepowershell-interactive +New-AzStorageAccount ` + -Location westcentralus ` + -Name ` + -ResourceGroupName myResourceGroup ` + -SkuName Standard_LRS ` + -Kind StorageV2 +``` + +Select the following options, as shown in the picture: + +1. Select *On* for **Status** +2. Select *Version 2* for **Flow Logs version**. Version 2 contains flow-session statistics (Bytes and Packets) +3. Select an existing storage account to store the flow logs in. Ensure that your storage does not have "Data Lake Storage Gen2 Hierarchical Namespace Enabled" set to true. +4. Set **Retention** to the number of days you want to store data for. If you want to store the data forever, set the value to *0*. You incur Azure Storage fees for the storage account. +5. Select *On* for **Traffic Analytics Status**. +6. Select processing interval. Based on your choice, flow logs will be collected from storage account and processed by Traffic Analytics. You can choose processing interval of every 1 hour or every 10 mins. +7. Select an existing Log Analytics (OMS) Workspace, or select **Create New Workspace** to create a new one. A Log Analytics workspace is used by Traffic Analytics to store the aggregated and indexed data that is then used to generate the analytics. If you select an existing workspace, it must exist in one of the [supported regions](supported-region-traffic-analytics.md) and have been upgraded to the new query language. If you do not wish to upgrade an existing workspace, or do not have a workspace in a supported region, create a new one. For more information about query languages, see [Azure Log Analytics upgrade to new log search](../azure-monitor/logs/log-query-overview.md?toc=%2fazure%2fnetwork-watcher%2ftoc.json). + + > [!NOTE] + > The log analytics workspace hosting the traffic analytics solution and the NSGs do not have to be in the same region. For example, you may have traffic analytics in a workspace in the West Europe region, while you may have NSGs in East US and West US. Multiple NSGs can be configured in the same workspace. + +8. Select **Save**. + + ![Screenshot showing selection of storage account, Log Analytics workspace, and Traffic Analytics enablement.](./media/traffic-analytics/ta-customprocessinginterval.png) + +Repeat the previous steps for any other NSGs for which you wish to enable traffic analytics for. Data from flow logs is sent to the workspace, so ensure that the local laws and regulations in your country/region permit data storage in the region where the workspace exists. If you have set different processing intervals for different NSGs, data will be collected at different intervals. For example, You can choose to enable processing interval of 10 mins for critical VNETs and 1 hour for noncritical VNETs. + +You can also configure traffic analytics using the [Set-AzNetworkWatcherConfigFlowLog](/powershell/module/az.network/set-aznetworkwatcherconfigflowlog) PowerShell cmdlet in Azure PowerShell. Run `Get-Module -ListAvailable Az` to find your installed version. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-Az-ps). + +## View traffic analytics + +To view Traffic Analytics, search for **Network Watcher** in the portal search bar. Once inside Network Watcher, to explore traffic analytics and its capabilities, select **Traffic Analytics** from the left menu. + +![Screenshot that displays how to access the Traffic Analytics dashboard.](./media/traffic-analytics/accessing-the-traffic-analytics-dashboard.png) + +The dashboard may take up to 30 minutes to appear the first time because Traffic Analytics must first aggregate enough data for it to derive meaningful insights, before it can generate any reports. \ No newline at end of file diff --git a/articles/network-watcher/index.yml b/articles/network-watcher/index.yml index 35dfdb08dade..85641348d5cd 100644 --- a/articles/network-watcher/index.yml +++ b/articles/network-watcher/index.yml @@ -73,20 +73,24 @@ landingContent: linkLists: - linkListType: concept links: - - text: Log VM network traffic + - text: NSG Flow Log overview url: network-watcher-nsg-flow-logging-overview.md - linkListType: tutorial links: - text: Log VM network traffic - url: network-watcher-nsg-flow-logging-portal.md - - linkListType: tutorial + url: network-watcher-nsg-flow-logging-portal.md + + # Card (optional) + - title: Traffic Analytics + linkLists: + - linkListType: concept links: - - text: Configure NSG flow logs - url: network-watcher-nsg-flow-logging-portal.md - - text: Analyze NSG flow logs using Traffic Analytics + - text: Traffic Analytics overview url: traffic-analytics.md - - text: Log VM network traffic - url: network-watcher-nsg-flow-logging-portal.md + - linkListType: tutorial + links: + - text: Enable Traffic Analytics + url: traffic-analytics-policy-portal.md # Card (optional) - title: Reference diff --git a/articles/network-watcher/network-watcher-monitoring-overview.md b/articles/network-watcher/network-watcher-monitoring-overview.md index df6aef747152..f2622842ca01 100644 --- a/articles/network-watcher/network-watcher-monitoring-overview.md +++ b/articles/network-watcher/network-watcher-monitoring-overview.md @@ -19,7 +19,11 @@ ms.custom: mvc # What is Azure Network Watcher? -Azure Network Watcher provides tools to monitor, diagnose, view metrics, and enable or disable logs for resources in an Azure virtual network. Network Watcher is designed to monitor and repair the network health of IaaS (Infrastructure-as-a-Service) products which includes Virtual Machines, Virtual Networks, Application Gateways, Load balancers, etc. Note: It is not intended for and will not work for PaaS monitoring or Web analytics. +Azure Network Watcher provides tools to monitor, diagnose, view metrics, and enable or disable logs for resources in an Azure virtual network. Network Watcher is designed to monitor and repair the network health of IaaS (Infrastructure-as-a-Service) products which includes Virtual Machines, Virtual Networks, Application Gateways, Load balancers, etc. +> [!Note] +> It is not intended for and will not work for PaaS monitoring or Web analytics. + +For information about analyzing traffic from a network security group, see [Network Security Group](network-watcher-nsg-flow-logging-overview.md) and [Traffic Analytics](traffic-analytics.md). ## Monitoring @@ -79,7 +83,7 @@ There are [limits](../azure-resource-manager/management/azure-subscription-servi The information is helpful when planning future resource deployments. -## Logs +## Network Monitoring Logs ### Analyze traffic to or from a network security group diff --git a/articles/network-watcher/supported-region-traffic-analytics.md b/articles/network-watcher/supported-region-traffic-analytics.md new file mode 100644 index 000000000000..7d2b539f5fdf --- /dev/null +++ b/articles/network-watcher/supported-region-traffic-analytics.md @@ -0,0 +1,147 @@ +--- +title: Azure Traffic Analytics supported regions | Microsoft Docs +description: This article provides the list of Traffic Analytics supported regions. +services: network-watcher +documentationcenter: na +author: v-ssenthilna + +ms.service: network-watcher +ms.topic: article +ms.tgt_pltfrm: na +ms.workload: infrastructure-services +ms.date: 05/11/2022 +ms.author: v-ssenthilna +ms.custon: references_regions + +--- +# Supported regions: NSG + +This article provides the list of regions supported by Traffic Analytics. You can view the list of supported regions of both NSG and Log Analytics Workspaces below. + +You can use traffic analytics for NSGs in any of the following supported regions: +:::row::: + :::column span=""::: + Australia Central + Australia East + Australia Southeast + Brazil South + Brazil Southeast + Canada Central + Canada East + Central India + Central US + China East 2 + China North + China North 2 + :::column-end::: + :::column span=""::: + East Asia + East US + East US 2 + East US 2 EUAP + France Central + Germany West Central + Japan East + Japan West + Korea Central + Korea South + North Central US + North Europe + :::column-end::: + :::column span=""::: + Norway East + South Africa North + South Central US + South India + Southeast Asia + Switzerland North + Switzerland West + UAE Central + UAE North + UK South + UK West + USGov Arizona + :::column-end::: + :::column span=""::: + USGov Texas + USGov Virginia + USNat East + USNat West + USSec East + USSec West + West Central US + West Europe + West US + West US 2 + West US 3 + :::column-end::: +:::row-end::: + +## Supported regions: Log Analytics Workspaces + +The Log Analytics workspace must exist in the following regions: +:::row::: + :::column span=""::: + Australia Central + Australia East + Australia Southeast + Brazil South + Brazil Southeast + Canada East + Canada Central + Central India + Central US + China East 2 + China North + China North 2 + :::column-end::: + :::column span=""::: + East Asia + East US + East US 2 + East US 2 EUAP + France Central + Germany West Central + Japan East + Japan West + Korea Central + Korea South + North Central US + North Europe + :::column-end::: + :::column span=""::: + Norway East + South Africa North + South Central US + South India + Southeast Asia + Switzerland North + Switzerland West + UAE Central + UAE North + UK South + UK West + USGov Arizona + :::column-end::: + :::column span=""::: + USGov Texas + USGov Virginia + USNat East + USNat West + USSec East + USSec West + West Central US + West Europe + West US + West US 2 + West US 3 + :::column-end::: +:::row-end::: + +> [!NOTE] +> If NSGs support a region, but the log analytics workspace does not support that region for traffic analytics as per above lists, then you can use log analytics workspace of any other supported region as a workaround. + +## Next steps + +- Learn how to [enable flow log settings](enable-network-watcher-flow-log-settings.md). +- Learn the ways to [use traffic analytics](usage-scenarios-traffic-analytics.md). \ No newline at end of file diff --git a/articles/network-watcher/toc.yml b/articles/network-watcher/toc.yml index 19ce1f801053..0c8de9b5391b 100644 --- a/articles/network-watcher/toc.yml +++ b/articles/network-watcher/toc.yml @@ -26,6 +26,18 @@ href: diagnose-communication-problem-between-networks.md - name: Log VM network traffic href: network-watcher-nsg-flow-logging-portal.md + - name: Configure NSG flow logs + items: + - name: Azure PowerShell + href: network-watcher-nsg-flow-logging-powershell.md + - name: Azure CLI + href: network-watcher-nsg-flow-logging-cli.md + - name: REST + href: network-watcher-nsg-flow-logging-rest.md + - name: Azure Resource Manager + href: network-watcher-nsg-flow-logging-azure-resource-manager.md + - name: Built-in Policy + href: nsg-flow-logs-policy-portal.md - name: Concepts items: - name: Connection Monitor @@ -44,6 +56,18 @@ href: network-watcher-troubleshoot-overview.md - name: Variable packet capture href: network-watcher-packet-capture-overview.md + - name: Traffic Analytics overview + items: + - name: Overview + href: traffic-analytics.md + - name: Supported regions + href: supported-region-traffic-analytics.md + - name: Network Watcher and flow log settings + href: enable-network-watcher-flow-log-settings.md + - name: Usage scenarios + href: usage-scenarios-traffic-analytics.md + - name: Frequently asked questions + href: traffic-analytics-faq.yml - name: Network security group flow logging href: network-watcher-nsg-flow-logging-overview.md - name: Network security group view @@ -120,38 +144,12 @@ href: network-watcher-intrusion-detection-open-source-tools.md - name: Visualize network traffic patterns using open source tools href: network-watcher-using-open-source-tools.md - - name: Work with network security groups + - name: Flow Log Traffic Monitoring items: - - name: Configure NSG flow logs - items: - - name: Azure PowerShell - href: network-watcher-nsg-flow-logging-powershell.md - - name: Azure CLI - href: network-watcher-nsg-flow-logging-cli.md - - name: REST - href: network-watcher-nsg-flow-logging-rest.md - - name: Azure Resource Manager - href: network-watcher-nsg-flow-logging-azure-resource-manager.md - - name: Built-in Policy - href: nsg-flow-logs-policy-portal.md - - name: Delete NSG flow log storage blobs - href: network-watcher-delete-nsg-flow-log-blobs.md + - name: Read NSG flow logs + href: network-watcher-read-nsg-flow-logs.md - name: Analyze NSG flow logs items: - - name: Read NSG flow logs - href: network-watcher-read-nsg-flow-logs.md - - name: Use traffic analytics - items: - - name: Traffic Analytics overview - href: traffic-analytics.md - - name: Frequently asked questions - href: traffic-analytics-faq.yml - - name: Built-in Policy - href: traffic-analytics-policy-portal.md - - name: Schema and Data Aggregation - href: traffic-analytics-schema.md - - name: Schema update (August 2019) - href: traffic-analytics-schema-update.md - name: Use Power BI href: network-watcher-visualize-nsg-flow-logs-power-bi.md - name: Use Elastic Stack @@ -160,6 +158,16 @@ href: network-watcher-nsg-grafana.md - name: Use Graylog href: network-watcher-analyze-nsg-flow-logs-graylog.md + - name: Delete NSG flow log storage blobs + href: network-watcher-delete-nsg-flow-log-blobs.md + - name: Enable traffic analytics + items: + - name: Built-in Policy + href: traffic-analytics-policy-portal.md + - name: Schema and Data Aggregation + href: traffic-analytics-schema.md + - name: Schema update (August 2019) + href: traffic-analytics-schema-update.md - name: View network security groups items: - name: Azure PowerShell diff --git a/articles/network-watcher/traffic-analytics-faq.yml b/articles/network-watcher/traffic-analytics-faq.yml index 3724d9594a8d..15020f85dce4 100644 --- a/articles/network-watcher/traffic-analytics-faq.yml +++ b/articles/network-watcher/traffic-analytics-faq.yml @@ -9,12 +9,12 @@ metadata: ms.topic: faq ms.tgt_pltfrm: na ms.workload: infrastructure-services - ms.date: 01/04/2021 + ms.date: 05/12/2022 ms.author: damendo ms.custom: devx-track-azurepowershell -title: Traffic Analytics frequently asked questions +title: Traffic Analytics - frequently asked questions summary: | - This article collects in one place many of the most frequently asked questions about traffic analytics in Azure Network Watcher. + This article provides answers to the most frequently asked questions about traffic analytics in Azure Network Watcher. [!INCLUDE [updated-for-az](../../includes/updated-for-az.md)] @@ -58,7 +58,7 @@ sections: 3. To list all the roles that are assigned to a specified user, use **Get-AzRoleAssignment -SignInName [user email] -IncludeClassicAdministrators**. - If you are not seeing any output, contact the respective subscription admin to get access to run the commands. For more details, see [Add or remove Azure role assignments using Azure PowerShell](../role-based-access-control/role-assignments-powershell.md). + If you are not seeing any output, contact the respective subscription admin to get access to run the commands. For more information, see [Add or remove Azure role assignments using Azure PowerShell](../role-based-access-control/role-assignments-powershell.md). - question: | Can the NSGs I enable flow logs for be in different regions than my workspace? @@ -93,7 +93,7 @@ sections: - question: | What if I am getting the status, “Failed to load,” under the NSG flow logs page? answer: | - The Microsoft.Insights provider must be registered for flow logging to work properly. If you are not sure whether the Microsoft.Insights provider is registered for your subscription, replace *xxxxx-xxxxx-xxxxxx-xxxx* in the following command, and run the following commands from PowerShell: + The Microsoft.Insights  provider must be registered for flow logging to work properly. If you are not sure whether the Microsoft.Insights provider is registered for your subscription, replace *xxxxx-xxxxx-xxxxxx-xxxx* in the following command, and run the following commands from PowerShell: ```powershell-interactive **Select-AzSubscription** -SubscriptionId xxxxx-xxxxx-xxxxxx-xxxx @@ -125,7 +125,7 @@ sections: If problems persist, raise concerns in the [User voice forum](https://feedback.azure.com/d365community/forum/8ae9bf04-8326-ec11-b6e6-000d3a4f0789?c=cd276b66-8326-ec11-b6e6-000d3a4f0789). - question: | - What if I get this message: “Looks like we have resources data (Topology) and no flows information. Meanwhile, click here to see resources data and refer to FAQs for further information.”? + What if I get this message: “Looks like we have resources data (Topology) and no flows information. For more information, click here to see resources data and refer to FAQs.”? answer: | You are seeing the resources information on the dashboard; however, no flow-related statistics are present. Data might not be present because of no communication flows between the resources. Wait for 60 minutes, and recheck status. If the problem persists, and you're sure that communication flows among resources exist, raise concerns in the [User voice forum](https://feedback.azure.com/d365community/forum/8ae9bf04-8326-ec11-b6e6-000d3a4f0789?c=cd276b66-8326-ec11-b6e6-000d3a4f0789). @@ -155,7 +155,7 @@ sections: { 'storageId': '${TAstorageId}', 'enabled': '', - 'retentionPolicy' : + 'retentionPolicy': { days: , enabled: @@ -165,7 +165,7 @@ sections: { 'networkWatcherFlowAnalyticsConfiguration': { - 'enabled':, + 'enabled': 'workspaceId':'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'workspaceRegion':'', 'workspaceResourceId':'/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/' @@ -216,14 +216,14 @@ sections: - question: | How does Traffic Analytics decide that an IP is malicious? answer: | - Traffic Analytics relies on Microsoft internal threat intelligence systems to deem an IP as malicious. These systems leverage diverse telemetry sources like Microsoft products and services,the Microsoft Digital Crimes Unit (DCU), the Microsoft Security Response Center (MSRC), and external feeds and build a lot of intelligence on top of it. + Traffic Analytics relies on Microsoft internal threat intelligence systems to deem an IP as malicious. These systems leverage diverse telemetry sources like Microsoft products and services, the Microsoft Digital Crimes Unit (DCU), the Microsoft Security Response Center (MSRC), and external feeds and build a lot of intelligence on top of it. Some of this data is Microsoft Internal. If a known IP is getting flagged as malicious, please raise a support ticket to know the details. - question: | How can I set alerts on Traffic Analytics data? answer: | Traffic Analytics does not have inbuilt support for alerts. However, since Traffic Analytics data is stored in Log Analytics you can write custom queries and set alerts on them. - Steps : + Steps: - You can use the shortlink for Log Analytics in Traffic Analytics. - Use the [schema documented here](traffic-analytics-schema.md) to write your queries - Click "New alert rule" to create the alert @@ -239,7 +239,7 @@ sections: | mvexpand vm = pack_array(VM1_s, VM2_s) to typeof(string) | where isnotempty(vm) | extend traffic = AllowedInFlows_d + DeniedInFlows_d + AllowedOutFlows_d + DeniedOutFlows_d // For bytes use: | extend traffic = InboundBytes_d + OutboundBytes_d - | make-series TotalTraffic = sum(traffic) default = 0 on FlowStartTime_t from datetime(